LLVM 22.0.0git
InstCombineCompares.cpp
Go to the documentation of this file.
1//===- InstCombineCompares.cpp --------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitICmp and visitFCmp functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APSInt.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/Loads.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/InstrTypes.h"
34#include <bitset>
35
36using namespace llvm;
37using namespace PatternMatch;
38
39#define DEBUG_TYPE "instcombine"
40
41// How many times is a select replaced by one of its operands?
42STATISTIC(NumSel, "Number of select opts");
43
44/// Compute Result = In1+In2, returning true if the result overflowed for this
45/// type.
46static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
47 bool IsSigned = false) {
48 bool Overflow;
49 if (IsSigned)
50 Result = In1.sadd_ov(In2, Overflow);
51 else
52 Result = In1.uadd_ov(In2, Overflow);
53
54 return Overflow;
55}
56
57/// Compute Result = In1-In2, returning true if the result overflowed for this
58/// type.
59static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
60 bool IsSigned = false) {
61 bool Overflow;
62 if (IsSigned)
63 Result = In1.ssub_ov(In2, Overflow);
64 else
65 Result = In1.usub_ov(In2, Overflow);
66
67 return Overflow;
68}
69
70/// Given an icmp instruction, return true if any use of this comparison is a
71/// branch on sign bit comparison.
72static bool hasBranchUse(ICmpInst &I) {
73 for (auto *U : I.users())
74 if (isa<BranchInst>(U))
75 return true;
76 return false;
77}
78
79/// Returns true if the exploded icmp can be expressed as a signed comparison
80/// to zero and updates the predicate accordingly.
81/// The signedness of the comparison is preserved.
82/// TODO: Refactor with decomposeBitTestICmp()?
83static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
84 if (!ICmpInst::isSigned(Pred))
85 return false;
86
87 if (C.isZero())
88 return ICmpInst::isRelational(Pred);
89
90 if (C.isOne()) {
91 if (Pred == ICmpInst::ICMP_SLT) {
92 Pred = ICmpInst::ICMP_SLE;
93 return true;
94 }
95 } else if (C.isAllOnes()) {
96 if (Pred == ICmpInst::ICMP_SGT) {
97 Pred = ICmpInst::ICMP_SGE;
98 return true;
99 }
100 }
101
102 return false;
103}
104
105/// This is called when we see this pattern:
106/// cmp pred (load (gep GV, ...)), cmpcst
107/// where GV is a global variable with a constant initializer. Try to simplify
108/// this into some simple computation that does not need the load. For example
109/// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
110///
111/// If AndCst is non-null, then the loaded value is masked with that constant
112/// before doing the comparison. This handles cases like "A[i]&4 == 0".
114 LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst) {
116 if (LI->isVolatile() || !GV || !GV->isConstant() ||
117 !GV->hasDefinitiveInitializer())
118 return nullptr;
119
120 Type *EltTy = LI->getType();
121 TypeSize EltSize = DL.getTypeStoreSize(EltTy);
122 if (EltSize.isScalable())
123 return nullptr;
124
126 if (!Expr.Index || Expr.BasePtr != GV || Expr.Offset.getBitWidth() > 64)
127 return nullptr;
128
129 Constant *Init = GV->getInitializer();
130 TypeSize GlobalSize = DL.getTypeAllocSize(Init->getType());
131
132 Value *Idx = Expr.Index;
133 const APInt &Stride = Expr.Scale;
134 const APInt &ConstOffset = Expr.Offset;
135
136 // Allow an additional context offset, but only within the stride.
137 if (!ConstOffset.ult(Stride))
138 return nullptr;
139
140 // Don't handle overlapping loads for now.
141 if (!Stride.uge(EltSize.getFixedValue()))
142 return nullptr;
143
144 // Don't blow up on huge arrays.
145 uint64_t ArrayElementCount =
146 divideCeil((GlobalSize.getFixedValue() - ConstOffset.getZExtValue()),
147 Stride.getZExtValue());
148 if (ArrayElementCount > MaxArraySizeForCombine)
149 return nullptr;
150
151 enum { Overdefined = -3, Undefined = -2 };
152
153 // Variables for our state machines.
154
155 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
156 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
157 // and 87 is the second (and last) index. FirstTrueElement is -2 when
158 // undefined, otherwise set to the first true element. SecondTrueElement is
159 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
160 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
161
162 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
163 // form "i != 47 & i != 87". Same state transitions as for true elements.
164 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
165
166 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
167 /// define a state machine that triggers for ranges of values that the index
168 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
169 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
170 /// index in the range (inclusive). We use -2 for undefined here because we
171 /// use relative comparisons and don't want 0-1 to match -1.
172 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
173
174 // MagicBitvector - This is a magic bitvector where we set a bit if the
175 // comparison is true for element 'i'. If there are 64 elements or less in
176 // the array, this will fully represent all the comparison results.
177 uint64_t MagicBitvector = 0;
178
179 // Scan the array and see if one of our patterns matches.
180 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
181 APInt Offset = ConstOffset;
182 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i, Offset += Stride) {
184 if (!Elt)
185 return nullptr;
186
187 // If the element is masked, handle it.
188 if (AndCst) {
189 Elt = ConstantFoldBinaryOpOperands(Instruction::And, Elt, AndCst, DL);
190 if (!Elt)
191 return nullptr;
192 }
193
194 // Find out if the comparison would be true or false for the i'th element.
196 CompareRHS, DL, &TLI);
197 if (!C)
198 return nullptr;
199
200 // If the result is undef for this element, ignore it.
201 if (isa<UndefValue>(C)) {
202 // Extend range state machines to cover this element in case there is an
203 // undef in the middle of the range.
204 if (TrueRangeEnd == (int)i - 1)
205 TrueRangeEnd = i;
206 if (FalseRangeEnd == (int)i - 1)
207 FalseRangeEnd = i;
208 continue;
209 }
210
211 // If we can't compute the result for any of the elements, we have to give
212 // up evaluating the entire conditional.
213 if (!isa<ConstantInt>(C))
214 return nullptr;
215
216 // Otherwise, we know if the comparison is true or false for this element,
217 // update our state machines.
218 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
219
220 // State machine for single/double/range index comparison.
221 if (IsTrueForElt) {
222 // Update the TrueElement state machine.
223 if (FirstTrueElement == Undefined)
224 FirstTrueElement = TrueRangeEnd = i; // First true element.
225 else {
226 // Update double-compare state machine.
227 if (SecondTrueElement == Undefined)
228 SecondTrueElement = i;
229 else
230 SecondTrueElement = Overdefined;
231
232 // Update range state machine.
233 if (TrueRangeEnd == (int)i - 1)
234 TrueRangeEnd = i;
235 else
236 TrueRangeEnd = Overdefined;
237 }
238 } else {
239 // Update the FalseElement state machine.
240 if (FirstFalseElement == Undefined)
241 FirstFalseElement = FalseRangeEnd = i; // First false element.
242 else {
243 // Update double-compare state machine.
244 if (SecondFalseElement == Undefined)
245 SecondFalseElement = i;
246 else
247 SecondFalseElement = Overdefined;
248
249 // Update range state machine.
250 if (FalseRangeEnd == (int)i - 1)
251 FalseRangeEnd = i;
252 else
253 FalseRangeEnd = Overdefined;
254 }
255 }
256
257 // If this element is in range, update our magic bitvector.
258 if (i < 64 && IsTrueForElt)
259 MagicBitvector |= 1ULL << i;
260
261 // If all of our states become overdefined, bail out early. Since the
262 // predicate is expensive, only check it every 8 elements. This is only
263 // really useful for really huge arrays.
264 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
265 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
266 FalseRangeEnd == Overdefined)
267 return nullptr;
268 }
269
270 // Now that we've scanned the entire array, emit our new comparison(s). We
271 // order the state machines in complexity of the generated code.
272
273 // If inbounds keyword is not present, Idx * Stride can overflow.
274 // Let's assume that Stride is 2 and the wanted value is at offset 0.
275 // Then, there are two possible values for Idx to match offset 0:
276 // 0x00..00, 0x80..00.
277 // Emitting 'icmp eq Idx, 0' isn't correct in this case because the
278 // comparison is false if Idx was 0x80..00.
279 // We need to erase the highest countTrailingZeros(ElementSize) bits of Idx.
280 auto MaskIdx = [&](Value *Idx) {
281 if (!Expr.Flags.isInBounds() && Stride.countr_zero() != 0) {
283 Mask = Builder.CreateLShr(Mask, Stride.countr_zero());
284 Idx = Builder.CreateAnd(Idx, Mask);
285 }
286 return Idx;
287 };
288
289 // If the comparison is only true for one or two elements, emit direct
290 // comparisons.
291 if (SecondTrueElement != Overdefined) {
292 Idx = MaskIdx(Idx);
293 // None true -> false.
294 if (FirstTrueElement == Undefined)
295 return replaceInstUsesWith(ICI, Builder.getFalse());
296
297 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
298
299 // True for one element -> 'i == 47'.
300 if (SecondTrueElement == Undefined)
301 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
302
303 // True for two elements -> 'i == 47 | i == 72'.
304 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
305 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
306 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
307 return BinaryOperator::CreateOr(C1, C2);
308 }
309
310 // If the comparison is only false for one or two elements, emit direct
311 // comparisons.
312 if (SecondFalseElement != Overdefined) {
313 Idx = MaskIdx(Idx);
314 // None false -> true.
315 if (FirstFalseElement == Undefined)
316 return replaceInstUsesWith(ICI, Builder.getTrue());
317
318 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
319
320 // False for one element -> 'i != 47'.
321 if (SecondFalseElement == Undefined)
322 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
323
324 // False for two elements -> 'i != 47 & i != 72'.
325 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
326 Value *SecondFalseIdx =
327 ConstantInt::get(Idx->getType(), SecondFalseElement);
328 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
329 return BinaryOperator::CreateAnd(C1, C2);
330 }
331
332 // If the comparison can be replaced with a range comparison for the elements
333 // where it is true, emit the range check.
334 if (TrueRangeEnd != Overdefined) {
335 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
336 Idx = MaskIdx(Idx);
337
338 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
339 if (FirstTrueElement) {
340 Value *Offs = ConstantInt::getSigned(Idx->getType(), -FirstTrueElement);
341 Idx = Builder.CreateAdd(Idx, Offs);
342 }
343
344 Value *End =
345 ConstantInt::get(Idx->getType(), TrueRangeEnd - FirstTrueElement + 1);
346 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
347 }
348
349 // False range check.
350 if (FalseRangeEnd != Overdefined) {
351 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
352 Idx = MaskIdx(Idx);
353 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
354 if (FirstFalseElement) {
355 Value *Offs = ConstantInt::getSigned(Idx->getType(), -FirstFalseElement);
356 Idx = Builder.CreateAdd(Idx, Offs);
357 }
358
359 Value *End =
360 ConstantInt::get(Idx->getType(), FalseRangeEnd - FirstFalseElement);
361 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
362 }
363
364 // If a magic bitvector captures the entire comparison state
365 // of this load, replace it with computation that does:
366 // ((magic_cst >> i) & 1) != 0
367 {
368 Type *Ty = nullptr;
369
370 // Look for an appropriate type:
371 // - The type of Idx if the magic fits
372 // - The smallest fitting legal type
373 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
374 Ty = Idx->getType();
375 else
376 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
377
378 if (Ty) {
379 Idx = MaskIdx(Idx);
380 Value *V = Builder.CreateIntCast(Idx, Ty, false);
381 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
382 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
383 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
384 }
385 }
386
387 return nullptr;
388}
389
390/// Returns true if we can rewrite Start as a GEP with pointer Base
391/// and some integer offset. The nodes that need to be re-written
392/// for this transformation will be added to Explored.
394 const DataLayout &DL,
395 SetVector<Value *> &Explored) {
396 SmallVector<Value *, 16> WorkList(1, Start);
397 Explored.insert(Base);
398
399 // The following traversal gives us an order which can be used
400 // when doing the final transformation. Since in the final
401 // transformation we create the PHI replacement instructions first,
402 // we don't have to get them in any particular order.
403 //
404 // However, for other instructions we will have to traverse the
405 // operands of an instruction first, which means that we have to
406 // do a post-order traversal.
407 while (!WorkList.empty()) {
409
410 while (!WorkList.empty()) {
411 if (Explored.size() >= 100)
412 return false;
413
414 Value *V = WorkList.back();
415
416 if (Explored.contains(V)) {
417 WorkList.pop_back();
418 continue;
419 }
420
422 // We've found some value that we can't explore which is different from
423 // the base. Therefore we can't do this transformation.
424 return false;
425
426 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
427 // Only allow inbounds GEPs with at most one variable offset.
428 auto IsNonConst = [](Value *V) { return !isa<ConstantInt>(V); };
429 if (!GEP->isInBounds() || count_if(GEP->indices(), IsNonConst) > 1)
430 return false;
431
432 NW = NW.intersectForOffsetAdd(GEP->getNoWrapFlags());
433 if (!Explored.contains(GEP->getOperand(0)))
434 WorkList.push_back(GEP->getOperand(0));
435 }
436
437 if (WorkList.back() == V) {
438 WorkList.pop_back();
439 // We've finished visiting this node, mark it as such.
440 Explored.insert(V);
441 }
442
443 if (auto *PN = dyn_cast<PHINode>(V)) {
444 // We cannot transform PHIs on unsplittable basic blocks.
445 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
446 return false;
447 Explored.insert(PN);
448 PHIs.insert(PN);
449 }
450 }
451
452 // Explore the PHI nodes further.
453 for (auto *PN : PHIs)
454 for (Value *Op : PN->incoming_values())
455 if (!Explored.contains(Op))
456 WorkList.push_back(Op);
457 }
458
459 // Make sure that we can do this. Since we can't insert GEPs in a basic
460 // block before a PHI node, we can't easily do this transformation if
461 // we have PHI node users of transformed instructions.
462 for (Value *Val : Explored) {
463 for (Value *Use : Val->uses()) {
464
465 auto *PHI = dyn_cast<PHINode>(Use);
466 auto *Inst = dyn_cast<Instruction>(Val);
467
468 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
469 !Explored.contains(PHI))
470 continue;
471
472 if (PHI->getParent() == Inst->getParent())
473 return false;
474 }
475 }
476 return true;
477}
478
479// Sets the appropriate insert point on Builder where we can add
480// a replacement Instruction for V (if that is possible).
481static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
482 bool Before = true) {
483 if (auto *PHI = dyn_cast<PHINode>(V)) {
484 BasicBlock *Parent = PHI->getParent();
485 Builder.SetInsertPoint(Parent, Parent->getFirstInsertionPt());
486 return;
487 }
488 if (auto *I = dyn_cast<Instruction>(V)) {
489 if (!Before)
490 I = &*std::next(I->getIterator());
491 Builder.SetInsertPoint(I);
492 return;
493 }
494 if (auto *A = dyn_cast<Argument>(V)) {
495 // Set the insertion point in the entry block.
496 BasicBlock &Entry = A->getParent()->getEntryBlock();
497 Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
498 return;
499 }
500 // Otherwise, this is a constant and we don't need to set a new
501 // insertion point.
502 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
503}
504
505/// Returns a re-written value of Start as an indexed GEP using Base as a
506/// pointer.
508 const DataLayout &DL,
509 SetVector<Value *> &Explored,
510 InstCombiner &IC) {
511 // Perform all the substitutions. This is a bit tricky because we can
512 // have cycles in our use-def chains.
513 // 1. Create the PHI nodes without any incoming values.
514 // 2. Create all the other values.
515 // 3. Add the edges for the PHI nodes.
516 // 4. Emit GEPs to get the original pointers.
517 // 5. Remove the original instructions.
518 Type *IndexType = IntegerType::get(
519 Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
520
522 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
523
524 // Create the new PHI nodes, without adding any incoming values.
525 for (Value *Val : Explored) {
526 if (Val == Base)
527 continue;
528 // Create empty phi nodes. This avoids cyclic dependencies when creating
529 // the remaining instructions.
530 if (auto *PHI = dyn_cast<PHINode>(Val))
531 NewInsts[PHI] =
532 PHINode::Create(IndexType, PHI->getNumIncomingValues(),
533 PHI->getName() + ".idx", PHI->getIterator());
534 }
535 IRBuilder<> Builder(Base->getContext());
536
537 // Create all the other instructions.
538 for (Value *Val : Explored) {
539 if (NewInsts.contains(Val))
540 continue;
541
542 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
543 setInsertionPoint(Builder, GEP);
544 Value *Op = NewInsts[GEP->getOperand(0)];
545 Value *OffsetV = emitGEPOffset(&Builder, DL, GEP);
547 NewInsts[GEP] = OffsetV;
548 else
549 NewInsts[GEP] = Builder.CreateAdd(
550 Op, OffsetV, GEP->getOperand(0)->getName() + ".add",
551 /*NUW=*/NW.hasNoUnsignedWrap(),
552 /*NSW=*/NW.hasNoUnsignedSignedWrap());
553 continue;
554 }
555 if (isa<PHINode>(Val))
556 continue;
557
558 llvm_unreachable("Unexpected instruction type");
559 }
560
561 // Add the incoming values to the PHI nodes.
562 for (Value *Val : Explored) {
563 if (Val == Base)
564 continue;
565 // All the instructions have been created, we can now add edges to the
566 // phi nodes.
567 if (auto *PHI = dyn_cast<PHINode>(Val)) {
568 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
569 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
570 Value *NewIncoming = PHI->getIncomingValue(I);
571
572 auto It = NewInsts.find(NewIncoming);
573 if (It != NewInsts.end())
574 NewIncoming = It->second;
575
576 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
577 }
578 }
579 }
580
581 for (Value *Val : Explored) {
582 if (Val == Base)
583 continue;
584
585 setInsertionPoint(Builder, Val, false);
586 // Create GEP for external users.
587 Value *NewVal = Builder.CreateGEP(Builder.getInt8Ty(), Base, NewInsts[Val],
588 Val->getName() + ".ptr", NW);
589 IC.replaceInstUsesWith(*cast<Instruction>(Val), NewVal);
590 // Add old instruction to worklist for DCE. We don't directly remove it
591 // here because the original compare is one of the users.
593 }
594
595 return NewInsts[Start];
596}
597
598/// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
599/// We can look through PHIs, GEPs and casts in order to determine a common base
600/// between GEPLHS and RHS.
603 const DataLayout &DL,
604 InstCombiner &IC) {
605 // FIXME: Support vector of pointers.
606 if (GEPLHS->getType()->isVectorTy())
607 return nullptr;
608
609 if (!GEPLHS->hasAllConstantIndices())
610 return nullptr;
611
612 APInt Offset(DL.getIndexTypeSizeInBits(GEPLHS->getType()), 0);
613 Value *PtrBase =
615 /*AllowNonInbounds*/ false);
616
617 // Bail if we looked through addrspacecast.
618 if (PtrBase->getType() != GEPLHS->getType())
619 return nullptr;
620
621 // The set of nodes that will take part in this transformation.
622 SetVector<Value *> Nodes;
623 GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags();
624 if (!canRewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes))
625 return nullptr;
626
627 // We know we can re-write this as
628 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
629 // Since we've only looked through inbouds GEPs we know that we
630 // can't have overflow on either side. We can therefore re-write
631 // this as:
632 // OFFSET1 cmp OFFSET2
633 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes, IC);
634
635 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
636 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
637 // offset. Since Index is the offset of LHS to the base pointer, we will now
638 // compare the offsets instead of comparing the pointers.
640 IC.Builder.getInt(Offset), NewRHS);
641}
642
643/// Fold comparisons between a GEP instruction and something else. At this point
644/// we know that the GEP is on the LHS of the comparison.
647 // Don't transform signed compares of GEPs into index compares. Even if the
648 // GEP is inbounds, the final add of the base pointer can have signed overflow
649 // and would change the result of the icmp.
650 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
651 // the maximum signed value for the pointer type.
653 return nullptr;
654
655 // Look through bitcasts and addrspacecasts. We do not however want to remove
656 // 0 GEPs.
657 if (!isa<GetElementPtrInst>(RHS))
658 RHS = RHS->stripPointerCasts();
659
660 auto CanFold = [Cond](GEPNoWrapFlags NW) {
662 return true;
663
664 // Unsigned predicates can be folded if the GEPs have *any* nowrap flags.
666 return NW != GEPNoWrapFlags::none();
667 };
668
669 auto NewICmp = [Cond](GEPNoWrapFlags NW, Value *Op1, Value *Op2) {
670 if (!NW.hasNoUnsignedWrap()) {
671 // Convert signed to unsigned comparison.
672 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Op1, Op2);
673 }
674
675 auto *I = new ICmpInst(Cond, Op1, Op2);
676 I->setSameSign(NW.hasNoUnsignedSignedWrap());
677 return I;
678 };
679
681 if (Base.Ptr == RHS && CanFold(Base.LHSNW) && !Base.isExpensive()) {
682 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
683 Type *IdxTy = DL.getIndexType(GEPLHS->getType());
684 Value *Offset =
685 EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, /*RewriteGEPs=*/true);
686 return NewICmp(Base.LHSNW, Offset,
687 Constant::getNullValue(Offset->getType()));
688 }
689
690 if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
691 isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
692 !NullPointerIsDefined(I.getFunction(),
693 RHS->getType()->getPointerAddressSpace())) {
694 // For most address spaces, an allocation can't be placed at null, but null
695 // itself is treated as a 0 size allocation in the in bounds rules. Thus,
696 // the only valid inbounds address derived from null, is null itself.
697 // Thus, we have four cases to consider:
698 // 1) Base == nullptr, Offset == 0 -> inbounds, null
699 // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
700 // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
701 // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
702 //
703 // (Note if we're indexing a type of size 0, that simply collapses into one
704 // of the buckets above.)
705 //
706 // In general, we're allowed to make values less poison (i.e. remove
707 // sources of full UB), so in this case, we just select between the two
708 // non-poison cases (1 and 4 above).
709 //
710 // For vectors, we apply the same reasoning on a per-lane basis.
711 auto *Base = GEPLHS->getPointerOperand();
712 if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
713 auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
714 Base = Builder.CreateVectorSplat(EC, Base);
715 }
716 return new ICmpInst(Cond, Base,
718 cast<Constant>(RHS), Base->getType()));
719 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
720 GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags() & GEPRHS->getNoWrapFlags();
721
722 // If the base pointers are different, but the indices are the same, just
723 // compare the base pointer.
724 if (GEPLHS->getOperand(0) != GEPRHS->getOperand(0)) {
725 bool IndicesTheSame =
726 GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
727 GEPLHS->getPointerOperand()->getType() ==
728 GEPRHS->getPointerOperand()->getType() &&
729 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType();
730 if (IndicesTheSame)
731 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
732 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
733 IndicesTheSame = false;
734 break;
735 }
736
737 // If all indices are the same, just compare the base pointers.
738 Type *BaseType = GEPLHS->getOperand(0)->getType();
739 if (IndicesTheSame &&
740 CmpInst::makeCmpResultType(BaseType) == I.getType() && CanFold(NW))
741 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
742
743 // If we're comparing GEPs with two base pointers that only differ in type
744 // and both GEPs have only constant indices or just one use, then fold
745 // the compare with the adjusted indices.
746 // FIXME: Support vector of pointers.
747 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
748 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
749 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
750 GEPLHS->getOperand(0)->stripPointerCasts() ==
751 GEPRHS->getOperand(0)->stripPointerCasts() &&
752 !GEPLHS->getType()->isVectorTy()) {
753 Value *LOffset = EmitGEPOffset(GEPLHS);
754 Value *ROffset = EmitGEPOffset(GEPRHS);
755
756 // If we looked through an addrspacecast between different sized address
757 // spaces, the LHS and RHS pointers are different sized
758 // integers. Truncate to the smaller one.
759 Type *LHSIndexTy = LOffset->getType();
760 Type *RHSIndexTy = ROffset->getType();
761 if (LHSIndexTy != RHSIndexTy) {
762 if (LHSIndexTy->getPrimitiveSizeInBits().getFixedValue() <
763 RHSIndexTy->getPrimitiveSizeInBits().getFixedValue()) {
764 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
765 } else
766 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
767 }
768
770 LOffset, ROffset);
771 return replaceInstUsesWith(I, Cmp);
772 }
773 }
774
775 if (GEPLHS->getOperand(0) == GEPRHS->getOperand(0) &&
776 GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
777 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType()) {
778 // If the GEPs only differ by one index, compare it.
779 unsigned NumDifferences = 0; // Keep track of # differences.
780 unsigned DiffOperand = 0; // The operand that differs.
781 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
782 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
783 Type *LHSType = GEPLHS->getOperand(i)->getType();
784 Type *RHSType = GEPRHS->getOperand(i)->getType();
785 // FIXME: Better support for vector of pointers.
786 if (LHSType->getPrimitiveSizeInBits() !=
787 RHSType->getPrimitiveSizeInBits() ||
788 (GEPLHS->getType()->isVectorTy() &&
789 (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
790 // Irreconcilable differences.
791 NumDifferences = 2;
792 break;
793 }
794
795 if (NumDifferences++)
796 break;
797 DiffOperand = i;
798 }
799
800 if (NumDifferences == 0) // SAME GEP?
801 return replaceInstUsesWith(
802 I, // No comparison is needed here.
803 ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
804 // If two GEPs only differ by an index, compare them.
805 // Note that nowrap flags are always needed when comparing two indices.
806 else if (NumDifferences == 1 && NW != GEPNoWrapFlags::none()) {
807 Value *LHSV = GEPLHS->getOperand(DiffOperand);
808 Value *RHSV = GEPRHS->getOperand(DiffOperand);
809 return NewICmp(NW, LHSV, RHSV);
810 }
811 }
812
813 if (Base.Ptr && CanFold(Base.LHSNW & Base.RHSNW) && !Base.isExpensive()) {
814 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
815 Type *IdxTy = DL.getIndexType(GEPLHS->getType());
816 Value *L =
817 EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, /*RewriteGEP=*/true);
818 Value *R =
819 EmitGEPOffsets(Base.RHSGEPs, Base.RHSNW, IdxTy, /*RewriteGEP=*/true);
820 return NewICmp(Base.LHSNW & Base.RHSNW, L, R);
821 }
822 }
823
824 // Try convert this to an indexed compare by looking through PHIs/casts as a
825 // last resort.
826 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL, *this);
827}
828
830 // It would be tempting to fold away comparisons between allocas and any
831 // pointer not based on that alloca (e.g. an argument). However, even
832 // though such pointers cannot alias, they can still compare equal.
833 //
834 // But LLVM doesn't specify where allocas get their memory, so if the alloca
835 // doesn't escape we can argue that it's impossible to guess its value, and we
836 // can therefore act as if any such guesses are wrong.
837 //
838 // However, we need to ensure that this folding is consistent: We can't fold
839 // one comparison to false, and then leave a different comparison against the
840 // same value alone (as it might evaluate to true at runtime, leading to a
841 // contradiction). As such, this code ensures that all comparisons are folded
842 // at the same time, and there are no other escapes.
843
844 struct CmpCaptureTracker : public CaptureTracker {
845 AllocaInst *Alloca;
846 bool Captured = false;
847 /// The value of the map is a bit mask of which icmp operands the alloca is
848 /// used in.
850
851 CmpCaptureTracker(AllocaInst *Alloca) : Alloca(Alloca) {}
852
853 void tooManyUses() override { Captured = true; }
854
855 Action captured(const Use *U, UseCaptureInfo CI) override {
856 // TODO(captures): Use UseCaptureInfo.
857 auto *ICmp = dyn_cast<ICmpInst>(U->getUser());
858 // We need to check that U is based *only* on the alloca, and doesn't
859 // have other contributions from a select/phi operand.
860 // TODO: We could check whether getUnderlyingObjects() reduces to one
861 // object, which would allow looking through phi nodes.
862 if (ICmp && ICmp->isEquality() && getUnderlyingObject(*U) == Alloca) {
863 // Collect equality icmps of the alloca, and don't treat them as
864 // captures.
865 ICmps[ICmp] |= 1u << U->getOperandNo();
866 return Continue;
867 }
868
869 Captured = true;
870 return Stop;
871 }
872 };
873
874 CmpCaptureTracker Tracker(Alloca);
875 PointerMayBeCaptured(Alloca, &Tracker);
876 if (Tracker.Captured)
877 return false;
878
879 bool Changed = false;
880 for (auto [ICmp, Operands] : Tracker.ICmps) {
881 switch (Operands) {
882 case 1:
883 case 2: {
884 // The alloca is only used in one icmp operand. Assume that the
885 // equality is false.
886 auto *Res = ConstantInt::get(ICmp->getType(),
887 ICmp->getPredicate() == ICmpInst::ICMP_NE);
888 replaceInstUsesWith(*ICmp, Res);
890 Changed = true;
891 break;
892 }
893 case 3:
894 // Both icmp operands are based on the alloca, so this is comparing
895 // pointer offsets, without leaking any information about the address
896 // of the alloca. Ignore such comparisons.
897 break;
898 default:
899 llvm_unreachable("Cannot happen");
900 }
901 }
902
903 return Changed;
904}
905
906/// Fold "icmp pred (X+C), X".
908 CmpPredicate Pred) {
909 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
910 // so the values can never be equal. Similarly for all other "or equals"
911 // operators.
912 assert(!!C && "C should not be zero!");
913
914 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
915 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
916 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
917 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
918 Constant *R =
919 ConstantInt::get(X->getType(), APInt::getMaxValue(C.getBitWidth()) - C);
920 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
921 }
922
923 // (X+1) >u X --> X <u (0-1) --> X != 255
924 // (X+2) >u X --> X <u (0-2) --> X <u 254
925 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
926 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
927 return new ICmpInst(ICmpInst::ICMP_ULT, X,
928 ConstantInt::get(X->getType(), -C));
929
930 APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
931
932 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
933 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
934 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
935 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
936 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
937 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
938 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
939 return new ICmpInst(ICmpInst::ICMP_SGT, X,
940 ConstantInt::get(X->getType(), SMax - C));
941
942 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
943 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
944 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
945 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
946 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
947 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
948
949 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
950 return new ICmpInst(ICmpInst::ICMP_SLT, X,
951 ConstantInt::get(X->getType(), SMax - (C - 1)));
952}
953
954/// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
955/// (icmp eq/ne A, Log2(AP2/AP1)) ->
956/// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
958 const APInt &AP1,
959 const APInt &AP2) {
960 assert(I.isEquality() && "Cannot fold icmp gt/lt");
961
962 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
963 if (I.getPredicate() == I.ICMP_NE)
964 Pred = CmpInst::getInversePredicate(Pred);
965 return new ICmpInst(Pred, LHS, RHS);
966 };
967
968 // Don't bother doing any work for cases which InstSimplify handles.
969 if (AP2.isZero())
970 return nullptr;
971
972 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
973 if (IsAShr) {
974 if (AP2.isAllOnes())
975 return nullptr;
976 if (AP2.isNegative() != AP1.isNegative())
977 return nullptr;
978 if (AP2.sgt(AP1))
979 return nullptr;
980 }
981
982 if (!AP1)
983 // 'A' must be large enough to shift out the highest set bit.
984 return getICmp(I.ICMP_UGT, A,
985 ConstantInt::get(A->getType(), AP2.logBase2()));
986
987 if (AP1 == AP2)
988 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
989
990 int Shift;
991 if (IsAShr && AP1.isNegative())
992 Shift = AP1.countl_one() - AP2.countl_one();
993 else
994 Shift = AP1.countl_zero() - AP2.countl_zero();
995
996 if (Shift > 0) {
997 if (IsAShr && AP1 == AP2.ashr(Shift)) {
998 // There are multiple solutions if we are comparing against -1 and the LHS
999 // of the ashr is not a power of two.
1000 if (AP1.isAllOnes() && !AP2.isPowerOf2())
1001 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1002 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1003 } else if (AP1 == AP2.lshr(Shift)) {
1004 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1005 }
1006 }
1007
1008 // Shifting const2 will never be equal to const1.
1009 // FIXME: This should always be handled by InstSimplify?
1010 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1011 return replaceInstUsesWith(I, TorF);
1012}
1013
1014/// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1015/// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1017 const APInt &AP1,
1018 const APInt &AP2) {
1019 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1020
1021 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1022 if (I.getPredicate() == I.ICMP_NE)
1023 Pred = CmpInst::getInversePredicate(Pred);
1024 return new ICmpInst(Pred, LHS, RHS);
1025 };
1026
1027 // Don't bother doing any work for cases which InstSimplify handles.
1028 if (AP2.isZero())
1029 return nullptr;
1030
1031 unsigned AP2TrailingZeros = AP2.countr_zero();
1032
1033 if (!AP1 && AP2TrailingZeros != 0)
1034 return getICmp(
1035 I.ICMP_UGE, A,
1036 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1037
1038 if (AP1 == AP2)
1039 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1040
1041 // Get the distance between the lowest bits that are set.
1042 int Shift = AP1.countr_zero() - AP2TrailingZeros;
1043
1044 if (Shift > 0 && AP2.shl(Shift) == AP1)
1045 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1046
1047 // Shifting const2 will never be equal to const1.
1048 // FIXME: This should always be handled by InstSimplify?
1049 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1050 return replaceInstUsesWith(I, TorF);
1051}
1052
1053/// The caller has matched a pattern of the form:
1054/// I = icmp ugt (add (add A, B), CI2), CI1
1055/// If this is of the form:
1056/// sum = a + b
1057/// if (sum+128 >u 255)
1058/// Then replace it with llvm.sadd.with.overflow.i8.
1059///
1061 ConstantInt *CI2, ConstantInt *CI1,
1062 InstCombinerImpl &IC) {
1063 // The transformation we're trying to do here is to transform this into an
1064 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1065 // with a narrower add, and discard the add-with-constant that is part of the
1066 // range check (if we can't eliminate it, this isn't profitable).
1067
1068 // In order to eliminate the add-with-constant, the compare can be its only
1069 // use.
1070 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1071 if (!AddWithCst->hasOneUse())
1072 return nullptr;
1073
1074 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1075 if (!CI2->getValue().isPowerOf2())
1076 return nullptr;
1077 unsigned NewWidth = CI2->getValue().countr_zero();
1078 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1079 return nullptr;
1080
1081 // The width of the new add formed is 1 more than the bias.
1082 ++NewWidth;
1083
1084 // Check to see that CI1 is an all-ones value with NewWidth bits.
1085 if (CI1->getBitWidth() == NewWidth ||
1086 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1087 return nullptr;
1088
1089 // This is only really a signed overflow check if the inputs have been
1090 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1091 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1092 if (IC.ComputeMaxSignificantBits(A, &I) > NewWidth ||
1093 IC.ComputeMaxSignificantBits(B, &I) > NewWidth)
1094 return nullptr;
1095
1096 // In order to replace the original add with a narrower
1097 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1098 // and truncates that discard the high bits of the add. Verify that this is
1099 // the case.
1100 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1101 for (User *U : OrigAdd->users()) {
1102 if (U == AddWithCst)
1103 continue;
1104
1105 // Only accept truncates for now. We would really like a nice recursive
1106 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1107 // chain to see which bits of a value are actually demanded. If the
1108 // original add had another add which was then immediately truncated, we
1109 // could still do the transformation.
1111 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1112 return nullptr;
1113 }
1114
1115 // If the pattern matches, truncate the inputs to the narrower type and
1116 // use the sadd_with_overflow intrinsic to efficiently compute both the
1117 // result and the overflow bit.
1118 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1120 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1121
1122 InstCombiner::BuilderTy &Builder = IC.Builder;
1123
1124 // Put the new code above the original add, in case there are any uses of the
1125 // add between the add and the compare.
1126 Builder.SetInsertPoint(OrigAdd);
1127
1128 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1129 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1130 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1131 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1132 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1133
1134 // The inner add was the result of the narrow add, zero extended to the
1135 // wider type. Replace it with the result computed by the intrinsic.
1136 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1137 IC.eraseInstFromFunction(*OrigAdd);
1138
1139 // The original icmp gets replaced with the overflow value.
1140 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1141}
1142
1143/// If we have:
1144/// icmp eq/ne (urem/srem %x, %y), 0
1145/// iff %y is a power-of-two, we can replace this with a bit test:
1146/// icmp eq/ne (and %x, (add %y, -1)), 0
1148 // This fold is only valid for equality predicates.
1149 if (!I.isEquality())
1150 return nullptr;
1151 CmpPredicate Pred;
1152 Value *X, *Y, *Zero;
1153 if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1154 m_CombineAnd(m_Zero(), m_Value(Zero)))))
1155 return nullptr;
1156 if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, &I))
1157 return nullptr;
1158 // This may increase instruction count, we don't enforce that Y is a constant.
1159 Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1160 Value *Masked = Builder.CreateAnd(X, Mask);
1161 return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1162}
1163
1164/// Fold equality-comparison between zero and any (maybe truncated) right-shift
1165/// by one-less-than-bitwidth into a sign test on the original value.
1167 Instruction *Val;
1168 CmpPredicate Pred;
1169 if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1170 return nullptr;
1171
1172 Value *X;
1173 Type *XTy;
1174
1175 Constant *C;
1176 if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1177 XTy = X->getType();
1178 unsigned XBitWidth = XTy->getScalarSizeInBits();
1180 APInt(XBitWidth, XBitWidth - 1))))
1181 return nullptr;
1182 } else if (isa<BinaryOperator>(Val) &&
1184 cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1185 /*AnalyzeForSignBitExtraction=*/true))) {
1186 XTy = X->getType();
1187 } else
1188 return nullptr;
1189
1190 return ICmpInst::Create(Instruction::ICmp,
1194}
1195
1196// Handle icmp pred X, 0
1198 CmpInst::Predicate Pred = Cmp.getPredicate();
1199 if (!match(Cmp.getOperand(1), m_Zero()))
1200 return nullptr;
1201
1202 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1203 if (Pred == ICmpInst::ICMP_SGT) {
1204 Value *A, *B;
1205 if (match(Cmp.getOperand(0), m_SMin(m_Value(A), m_Value(B)))) {
1206 if (isKnownPositive(A, SQ.getWithInstruction(&Cmp)))
1207 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1208 if (isKnownPositive(B, SQ.getWithInstruction(&Cmp)))
1209 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1210 }
1211 }
1212
1214 return New;
1215
1216 // Given:
1217 // icmp eq/ne (urem %x, %y), 0
1218 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1219 // icmp eq/ne %x, 0
1220 Value *X, *Y;
1221 if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1222 ICmpInst::isEquality(Pred)) {
1223 KnownBits XKnown = computeKnownBits(X, &Cmp);
1224 KnownBits YKnown = computeKnownBits(Y, &Cmp);
1225 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1226 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1227 }
1228
1229 // (icmp eq/ne (mul X Y)) -> (icmp eq/ne X/Y) if we know about whether X/Y are
1230 // odd/non-zero/there is no overflow.
1231 if (match(Cmp.getOperand(0), m_Mul(m_Value(X), m_Value(Y))) &&
1232 ICmpInst::isEquality(Pred)) {
1233
1234 KnownBits XKnown = computeKnownBits(X, &Cmp);
1235 // if X % 2 != 0
1236 // (icmp eq/ne Y)
1237 if (XKnown.countMaxTrailingZeros() == 0)
1238 return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1239
1240 KnownBits YKnown = computeKnownBits(Y, &Cmp);
1241 // if Y % 2 != 0
1242 // (icmp eq/ne X)
1243 if (YKnown.countMaxTrailingZeros() == 0)
1244 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1245
1246 auto *BO0 = cast<OverflowingBinaryOperator>(Cmp.getOperand(0));
1247 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1248 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
1249 // `isKnownNonZero` does more analysis than just `!KnownBits.One.isZero()`
1250 // but to avoid unnecessary work, first just if this is an obvious case.
1251
1252 // if X non-zero and NoOverflow(X * Y)
1253 // (icmp eq/ne Y)
1254 if (!XKnown.One.isZero() || isKnownNonZero(X, Q))
1255 return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1256
1257 // if Y non-zero and NoOverflow(X * Y)
1258 // (icmp eq/ne X)
1259 if (!YKnown.One.isZero() || isKnownNonZero(Y, Q))
1260 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1261 }
1262 // Note, we are skipping cases:
1263 // if Y % 2 != 0 AND X % 2 != 0
1264 // (false/true)
1265 // if X non-zero and Y non-zero and NoOverflow(X * Y)
1266 // (false/true)
1267 // Those can be simplified later as we would have already replaced the (icmp
1268 // eq/ne (mul X, Y)) with (icmp eq/ne X/Y) and if X/Y is known non-zero that
1269 // will fold to a constant elsewhere.
1270 }
1271
1272 // (icmp eq/ne f(X), 0) -> (icmp eq/ne X, 0)
1273 // where f(X) == 0 if and only if X == 0
1274 if (ICmpInst::isEquality(Pred))
1275 if (Value *Stripped = stripNullTest(Cmp.getOperand(0)))
1276 return new ICmpInst(Pred, Stripped,
1277 Constant::getNullValue(Stripped->getType()));
1278
1279 return nullptr;
1280}
1281
1282/// Fold icmp eq (num + mask) & ~mask, num
1283/// to
1284/// icmp eq (and num, mask), 0
1285/// Where mask is a low bit mask.
1287 Value *Num;
1288 CmpPredicate Pred;
1289 const APInt *Mask, *Neg;
1290
1291 if (!match(&Cmp,
1292 m_c_ICmp(Pred, m_Value(Num),
1294 m_LowBitMask(Mask))),
1295 m_APInt(Neg))))))
1296 return nullptr;
1297
1298 if (*Neg != ~*Mask)
1299 return nullptr;
1300
1301 if (!ICmpInst::isEquality(Pred))
1302 return nullptr;
1303
1304 // Create new icmp eq (num & mask), 0
1305 auto *NewAnd = Builder.CreateAnd(Num, *Mask);
1306 auto *Zero = Constant::getNullValue(Num->getType());
1307
1308 return new ICmpInst(Pred, NewAnd, Zero);
1309}
1310
1311/// Fold icmp Pred X, C.
1312/// TODO: This code structure does not make sense. The saturating add fold
1313/// should be moved to some other helper and extended as noted below (it is also
1314/// possible that code has been made unnecessary - do we canonicalize IR to
1315/// overflow/saturating intrinsics or not?).
1317 // Match the following pattern, which is a common idiom when writing
1318 // overflow-safe integer arithmetic functions. The source performs an addition
1319 // in wider type and explicitly checks for overflow using comparisons against
1320 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1321 //
1322 // TODO: This could probably be generalized to handle other overflow-safe
1323 // operations if we worked out the formulas to compute the appropriate magic
1324 // constants.
1325 //
1326 // sum = a + b
1327 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1328 CmpInst::Predicate Pred = Cmp.getPredicate();
1329 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1330 Value *A, *B;
1331 ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1332 if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1333 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1334 if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1335 return Res;
1336
1337 // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1339 if (!C)
1340 return nullptr;
1341
1342 if (auto *Phi = dyn_cast<PHINode>(Op0))
1343 if (all_of(Phi->operands(), IsaPred<Constant>)) {
1345 for (Value *V : Phi->incoming_values()) {
1346 Constant *Res =
1348 if (!Res)
1349 return nullptr;
1350 Ops.push_back(Res);
1351 }
1352 Builder.SetInsertPoint(Phi);
1353 PHINode *NewPhi = Builder.CreatePHI(Cmp.getType(), Phi->getNumOperands());
1354 for (auto [V, Pred] : zip(Ops, Phi->blocks()))
1355 NewPhi->addIncoming(V, Pred);
1356 return replaceInstUsesWith(Cmp, NewPhi);
1357 }
1358
1360 return R;
1361
1362 return nullptr;
1363}
1364
1365/// Canonicalize icmp instructions based on dominating conditions.
1367 // We already checked simple implication in InstSimplify, only handle complex
1368 // cases here.
1369 Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1370 const APInt *C;
1371 if (!match(Y, m_APInt(C)))
1372 return nullptr;
1373
1374 CmpInst::Predicate Pred = Cmp.getPredicate();
1376
1377 auto handleDomCond = [&](ICmpInst::Predicate DomPred,
1378 const APInt *DomC) -> Instruction * {
1379 // We have 2 compares of a variable with constants. Calculate the constant
1380 // ranges of those compares to see if we can transform the 2nd compare:
1381 // DomBB:
1382 // DomCond = icmp DomPred X, DomC
1383 // br DomCond, CmpBB, FalseBB
1384 // CmpBB:
1385 // Cmp = icmp Pred X, C
1386 ConstantRange DominatingCR =
1387 ConstantRange::makeExactICmpRegion(DomPred, *DomC);
1388 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1389 ConstantRange Difference = DominatingCR.difference(CR);
1390 if (Intersection.isEmptySet())
1391 return replaceInstUsesWith(Cmp, Builder.getFalse());
1392 if (Difference.isEmptySet())
1393 return replaceInstUsesWith(Cmp, Builder.getTrue());
1394
1395 // Canonicalizing a sign bit comparison that gets used in a branch,
1396 // pessimizes codegen by generating branch on zero instruction instead
1397 // of a test and branch. So we avoid canonicalizing in such situations
1398 // because test and branch instruction has better branch displacement
1399 // than compare and branch instruction.
1400 bool UnusedBit;
1401 bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1402 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1403 return nullptr;
1404
1405 // Avoid an infinite loop with min/max canonicalization.
1406 // TODO: This will be unnecessary if we canonicalize to min/max intrinsics.
1407 if (Cmp.hasOneUse() &&
1408 match(Cmp.user_back(), m_MaxOrMin(m_Value(), m_Value())))
1409 return nullptr;
1410
1411 if (const APInt *EqC = Intersection.getSingleElement())
1412 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1413 if (const APInt *NeC = Difference.getSingleElement())
1414 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1415 return nullptr;
1416 };
1417
1418 for (BranchInst *BI : DC.conditionsFor(X)) {
1419 CmpPredicate DomPred;
1420 const APInt *DomC;
1421 if (!match(BI->getCondition(),
1422 m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))))
1423 continue;
1424
1425 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
1426 if (DT.dominates(Edge0, Cmp.getParent())) {
1427 if (auto *V = handleDomCond(DomPred, DomC))
1428 return V;
1429 } else {
1430 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
1431 if (DT.dominates(Edge1, Cmp.getParent()))
1432 if (auto *V =
1433 handleDomCond(CmpInst::getInversePredicate(DomPred), DomC))
1434 return V;
1435 }
1436 }
1437
1438 return nullptr;
1439}
1440
1441/// Fold icmp (trunc X), C.
1443 TruncInst *Trunc,
1444 const APInt &C) {
1445 ICmpInst::Predicate Pred = Cmp.getPredicate();
1446 Value *X = Trunc->getOperand(0);
1447 Type *SrcTy = X->getType();
1448 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1449 SrcBits = SrcTy->getScalarSizeInBits();
1450
1451 // Match (icmp pred (trunc nuw/nsw X), C)
1452 // Which we can convert to (icmp pred X, (sext/zext C))
1453 if (shouldChangeType(Trunc->getType(), SrcTy)) {
1454 if (Trunc->hasNoSignedWrap())
1455 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.sext(SrcBits)));
1456 if (!Cmp.isSigned() && Trunc->hasNoUnsignedWrap())
1457 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.zext(SrcBits)));
1458 }
1459
1460 if (C.isOne() && C.getBitWidth() > 1) {
1461 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1462 Value *V = nullptr;
1463 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1464 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1465 ConstantInt::get(V->getType(), 1));
1466 }
1467
1468 // TODO: Handle non-equality predicates.
1469 Value *Y;
1470 const APInt *Pow2;
1471 if (Cmp.isEquality() && match(X, m_Shl(m_Power2(Pow2), m_Value(Y))) &&
1472 DstBits > Pow2->logBase2()) {
1473 // (trunc (Pow2 << Y) to iN) == 0 --> Y u>= N - log2(Pow2)
1474 // (trunc (Pow2 << Y) to iN) != 0 --> Y u< N - log2(Pow2)
1475 // iff N > log2(Pow2)
1476 if (C.isZero()) {
1477 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1478 return new ICmpInst(NewPred, Y,
1479 ConstantInt::get(SrcTy, DstBits - Pow2->logBase2()));
1480 }
1481 // (trunc (Pow2 << Y) to iN) == 2**C --> Y == C - log2(Pow2)
1482 // (trunc (Pow2 << Y) to iN) != 2**C --> Y != C - log2(Pow2)
1483 if (C.isPowerOf2())
1484 return new ICmpInst(
1485 Pred, Y, ConstantInt::get(SrcTy, C.logBase2() - Pow2->logBase2()));
1486 }
1487
1488 if (Cmp.isEquality() && (Trunc->hasOneUse() || Trunc->hasNoUnsignedWrap())) {
1489 // Canonicalize to a mask and wider compare if the wide type is suitable:
1490 // (trunc X to i8) == C --> (X & 0xff) == (zext C)
1491 if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1492 Constant *Mask =
1493 ConstantInt::get(SrcTy, APInt::getLowBitsSet(SrcBits, DstBits));
1494 Value *And = Trunc->hasNoUnsignedWrap() ? X : Builder.CreateAnd(X, Mask);
1495 Constant *WideC = ConstantInt::get(SrcTy, C.zext(SrcBits));
1496 return new ICmpInst(Pred, And, WideC);
1497 }
1498
1499 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1500 // of the high bits truncated out of x are known.
1501 KnownBits Known = computeKnownBits(X, &Cmp);
1502
1503 // If all the high bits are known, we can do this xform.
1504 if ((Known.Zero | Known.One).countl_one() >= SrcBits - DstBits) {
1505 // Pull in the high bits from known-ones set.
1506 APInt NewRHS = C.zext(SrcBits);
1507 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1508 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, NewRHS));
1509 }
1510 }
1511
1512 // Look through truncated right-shift of the sign-bit for a sign-bit check:
1513 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] < 0 --> ShOp < 0
1514 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] > -1 --> ShOp > -1
1515 Value *ShOp;
1516 uint64_t ShAmt;
1517 bool TrueIfSigned;
1518 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
1519 match(X, m_Shr(m_Value(ShOp), m_ConstantInt(ShAmt))) &&
1520 DstBits == SrcBits - ShAmt) {
1521 return TrueIfSigned ? new ICmpInst(ICmpInst::ICMP_SLT, ShOp,
1523 : new ICmpInst(ICmpInst::ICMP_SGT, ShOp,
1525 }
1526
1527 return nullptr;
1528}
1529
1530/// Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
1531/// Fold icmp (trunc nuw/nsw X), (zext/sext Y).
1534 const SimplifyQuery &Q) {
1535 Value *X, *Y;
1536 CmpPredicate Pred;
1537 bool YIsSExt = false;
1538 // Try to match icmp (trunc X), (trunc Y)
1539 if (match(&Cmp, m_ICmp(Pred, m_Trunc(m_Value(X)), m_Trunc(m_Value(Y))))) {
1540 unsigned NoWrapFlags = cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1541 cast<TruncInst>(Cmp.getOperand(1))->getNoWrapKind();
1542 if (Cmp.isSigned()) {
1543 // For signed comparisons, both truncs must be nsw.
1544 if (!(NoWrapFlags & TruncInst::NoSignedWrap))
1545 return nullptr;
1546 } else {
1547 // For unsigned and equality comparisons, either both must be nuw or
1548 // both must be nsw, we don't care which.
1549 if (!NoWrapFlags)
1550 return nullptr;
1551 }
1552
1553 if (X->getType() != Y->getType() &&
1554 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1555 return nullptr;
1556 if (!isDesirableIntType(X->getType()->getScalarSizeInBits()) &&
1557 isDesirableIntType(Y->getType()->getScalarSizeInBits())) {
1558 std::swap(X, Y);
1559 Pred = Cmp.getSwappedPredicate(Pred);
1560 }
1561 YIsSExt = !(NoWrapFlags & TruncInst::NoUnsignedWrap);
1562 }
1563 // Try to match icmp (trunc nuw X), (zext Y)
1564 else if (!Cmp.isSigned() &&
1565 match(&Cmp, m_c_ICmp(Pred, m_NUWTrunc(m_Value(X)),
1566 m_OneUse(m_ZExt(m_Value(Y)))))) {
1567 // Can fold trunc nuw + zext for unsigned and equality predicates.
1568 }
1569 // Try to match icmp (trunc nsw X), (sext Y)
1570 else if (match(&Cmp, m_c_ICmp(Pred, m_NSWTrunc(m_Value(X)),
1572 // Can fold trunc nsw + zext/sext for all predicates.
1573 YIsSExt =
1574 isa<SExtInst>(Cmp.getOperand(0)) || isa<SExtInst>(Cmp.getOperand(1));
1575 } else
1576 return nullptr;
1577
1578 Type *TruncTy = Cmp.getOperand(0)->getType();
1579 unsigned TruncBits = TruncTy->getScalarSizeInBits();
1580
1581 // If this transform will end up changing from desirable types -> undesirable
1582 // types skip it.
1583 if (isDesirableIntType(TruncBits) &&
1584 !isDesirableIntType(X->getType()->getScalarSizeInBits()))
1585 return nullptr;
1586
1587 Value *NewY = Builder.CreateIntCast(Y, X->getType(), YIsSExt);
1588 return new ICmpInst(Pred, X, NewY);
1589}
1590
1591/// Fold icmp (xor X, Y), C.
1594 const APInt &C) {
1595 if (Instruction *I = foldICmpXorShiftConst(Cmp, Xor, C))
1596 return I;
1597
1598 Value *X = Xor->getOperand(0);
1599 Value *Y = Xor->getOperand(1);
1600 const APInt *XorC;
1601 if (!match(Y, m_APInt(XorC)))
1602 return nullptr;
1603
1604 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1605 // fold the xor.
1606 ICmpInst::Predicate Pred = Cmp.getPredicate();
1607 bool TrueIfSigned = false;
1608 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1609
1610 // If the sign bit of the XorCst is not set, there is no change to
1611 // the operation, just stop using the Xor.
1612 if (!XorC->isNegative())
1613 return replaceOperand(Cmp, 0, X);
1614
1615 // Emit the opposite comparison.
1616 if (TrueIfSigned)
1617 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1618 ConstantInt::getAllOnesValue(X->getType()));
1619 else
1620 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1621 ConstantInt::getNullValue(X->getType()));
1622 }
1623
1624 if (Xor->hasOneUse()) {
1625 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1626 if (!Cmp.isEquality() && XorC->isSignMask()) {
1627 Pred = Cmp.getFlippedSignednessPredicate();
1628 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1629 }
1630
1631 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1632 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1633 Pred = Cmp.getFlippedSignednessPredicate();
1634 Pred = Cmp.getSwappedPredicate(Pred);
1635 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1636 }
1637 }
1638
1639 // Mask constant magic can eliminate an 'xor' with unsigned compares.
1640 if (Pred == ICmpInst::ICMP_UGT) {
1641 // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1642 if (*XorC == ~C && (C + 1).isPowerOf2())
1643 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1644 // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1645 if (*XorC == C && (C + 1).isPowerOf2())
1646 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1647 }
1648 if (Pred == ICmpInst::ICMP_ULT) {
1649 // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1650 if (*XorC == -C && C.isPowerOf2())
1651 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1652 ConstantInt::get(X->getType(), ~C));
1653 // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1654 if (*XorC == C && (-C).isPowerOf2())
1655 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1656 ConstantInt::get(X->getType(), ~C));
1657 }
1658 return nullptr;
1659}
1660
1661/// For power-of-2 C:
1662/// ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1)
1663/// ((X s>> ShiftC) ^ X) u> (C - 1) --> (X + C) u> ((C << 1) - 1)
1666 const APInt &C) {
1667 CmpInst::Predicate Pred = Cmp.getPredicate();
1668 APInt PowerOf2;
1669 if (Pred == ICmpInst::ICMP_ULT)
1670 PowerOf2 = C;
1671 else if (Pred == ICmpInst::ICMP_UGT && !C.isMaxValue())
1672 PowerOf2 = C + 1;
1673 else
1674 return nullptr;
1675 if (!PowerOf2.isPowerOf2())
1676 return nullptr;
1677 Value *X;
1678 const APInt *ShiftC;
1680 m_AShr(m_Deferred(X), m_APInt(ShiftC))))))
1681 return nullptr;
1682 uint64_t Shift = ShiftC->getLimitedValue();
1683 Type *XType = X->getType();
1684 if (Shift == 0 || PowerOf2.isMinSignedValue())
1685 return nullptr;
1686 Value *Add = Builder.CreateAdd(X, ConstantInt::get(XType, PowerOf2));
1687 APInt Bound =
1688 Pred == ICmpInst::ICMP_ULT ? PowerOf2 << 1 : ((PowerOf2 << 1) - 1);
1689 return new ICmpInst(Pred, Add, ConstantInt::get(XType, Bound));
1690}
1691
1692/// Fold icmp (and (sh X, Y), C2), C1.
1695 const APInt &C1,
1696 const APInt &C2) {
1697 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1698 if (!Shift || !Shift->isShift())
1699 return nullptr;
1700
1701 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1702 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1703 // code produced by the clang front-end, for bitfield access.
1704 // This seemingly simple opportunity to fold away a shift turns out to be
1705 // rather complicated. See PR17827 for details.
1706 unsigned ShiftOpcode = Shift->getOpcode();
1707 bool IsShl = ShiftOpcode == Instruction::Shl;
1708 const APInt *C3;
1709 if (match(Shift->getOperand(1), m_APInt(C3))) {
1710 APInt NewAndCst, NewCmpCst;
1711 bool AnyCmpCstBitsShiftedOut;
1712 if (ShiftOpcode == Instruction::Shl) {
1713 // For a left shift, we can fold if the comparison is not signed. We can
1714 // also fold a signed comparison if the mask value and comparison value
1715 // are not negative. These constraints may not be obvious, but we can
1716 // prove that they are correct using an SMT solver.
1717 if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1718 return nullptr;
1719
1720 NewCmpCst = C1.lshr(*C3);
1721 NewAndCst = C2.lshr(*C3);
1722 AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1723 } else if (ShiftOpcode == Instruction::LShr) {
1724 // For a logical right shift, we can fold if the comparison is not signed.
1725 // We can also fold a signed comparison if the shifted mask value and the
1726 // shifted comparison value are not negative. These constraints may not be
1727 // obvious, but we can prove that they are correct using an SMT solver.
1728 NewCmpCst = C1.shl(*C3);
1729 NewAndCst = C2.shl(*C3);
1730 AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1731 if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1732 return nullptr;
1733 } else {
1734 // For an arithmetic shift, check that both constants don't use (in a
1735 // signed sense) the top bits being shifted out.
1736 assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1737 NewCmpCst = C1.shl(*C3);
1738 NewAndCst = C2.shl(*C3);
1739 AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1740 if (NewAndCst.ashr(*C3) != C2)
1741 return nullptr;
1742 }
1743
1744 if (AnyCmpCstBitsShiftedOut) {
1745 // If we shifted bits out, the fold is not going to work out. As a
1746 // special case, check to see if this means that the result is always
1747 // true or false now.
1748 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1749 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1750 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1751 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1752 } else {
1753 Value *NewAnd = Builder.CreateAnd(
1754 Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1755 return new ICmpInst(Cmp.getPredicate(), NewAnd,
1756 ConstantInt::get(And->getType(), NewCmpCst));
1757 }
1758 }
1759
1760 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1761 // preferable because it allows the C2 << Y expression to be hoisted out of a
1762 // loop if Y is invariant and X is not.
1763 if (Shift->hasOneUse() && C1.isZero() && Cmp.isEquality() &&
1764 !Shift->isArithmeticShift() &&
1765 ((!IsShl && C2.isOne()) || !isa<Constant>(Shift->getOperand(0)))) {
1766 // Compute C2 << Y.
1767 Value *NewShift =
1768 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1769 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1770
1771 // Compute X & (C2 << Y).
1772 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1773 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1774 }
1775
1776 return nullptr;
1777}
1778
1779/// Fold icmp (and X, C2), C1.
1782 const APInt &C1) {
1783 bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1784
1785 // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1786 // TODO: We canonicalize to the longer form for scalars because we have
1787 // better analysis/folds for icmp, and codegen may be better with icmp.
1788 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isZero() &&
1789 match(And->getOperand(1), m_One()))
1790 return new TruncInst(And->getOperand(0), Cmp.getType());
1791
1792 const APInt *C2;
1793 Value *X;
1794 if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1795 return nullptr;
1796
1797 // (and X, highmask) s> [0, ~highmask] --> X s> ~highmask
1798 if (Cmp.getPredicate() == ICmpInst::ICMP_SGT && C1.ule(~*C2) &&
1799 C2->isNegatedPowerOf2())
1800 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1801 ConstantInt::get(X->getType(), ~*C2));
1802 // (and X, highmask) s< [1, -highmask] --> X s< -highmask
1803 if (Cmp.getPredicate() == ICmpInst::ICMP_SLT && !C1.isSignMask() &&
1804 (C1 - 1).ule(~*C2) && C2->isNegatedPowerOf2() && !C2->isSignMask())
1805 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1806 ConstantInt::get(X->getType(), -*C2));
1807
1808 // Don't perform the following transforms if the AND has multiple uses
1809 if (!And->hasOneUse())
1810 return nullptr;
1811
1812 if (Cmp.isEquality() && C1.isZero()) {
1813 // Restrict this fold to single-use 'and' (PR10267).
1814 // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1815 if (C2->isSignMask()) {
1816 Constant *Zero = Constant::getNullValue(X->getType());
1817 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1818 return new ICmpInst(NewPred, X, Zero);
1819 }
1820
1821 APInt NewC2 = *C2;
1822 KnownBits Know = computeKnownBits(And->getOperand(0), And);
1823 // Set high zeros of C2 to allow matching negated power-of-2.
1824 NewC2 = *C2 | APInt::getHighBitsSet(C2->getBitWidth(),
1825 Know.countMinLeadingZeros());
1826
1827 // Restrict this fold only for single-use 'and' (PR10267).
1828 // ((%x & C) == 0) --> %x u< (-C) iff (-C) is power of two.
1829 if (NewC2.isNegatedPowerOf2()) {
1830 Constant *NegBOC = ConstantInt::get(And->getType(), -NewC2);
1831 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1832 return new ICmpInst(NewPred, X, NegBOC);
1833 }
1834 }
1835
1836 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1837 // the input width without changing the value produced, eliminate the cast:
1838 //
1839 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1840 //
1841 // We can do this transformation if the constants do not have their sign bits
1842 // set or if it is an equality comparison. Extending a relational comparison
1843 // when we're checking the sign bit would not work.
1844 Value *W;
1845 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1846 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1847 // TODO: Is this a good transform for vectors? Wider types may reduce
1848 // throughput. Should this transform be limited (even for scalars) by using
1849 // shouldChangeType()?
1850 if (!Cmp.getType()->isVectorTy()) {
1851 Type *WideType = W->getType();
1852 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1853 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1854 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1855 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1856 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1857 }
1858 }
1859
1860 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1861 return I;
1862
1863 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1864 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1865 //
1866 // iff pred isn't signed
1867 if (!Cmp.isSigned() && C1.isZero() && And->getOperand(0)->hasOneUse() &&
1868 match(And->getOperand(1), m_One())) {
1869 Constant *One = cast<Constant>(And->getOperand(1));
1870 Value *Or = And->getOperand(0);
1871 Value *A, *B, *LShr;
1872 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1873 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1874 unsigned UsesRemoved = 0;
1875 if (And->hasOneUse())
1876 ++UsesRemoved;
1877 if (Or->hasOneUse())
1878 ++UsesRemoved;
1879 if (LShr->hasOneUse())
1880 ++UsesRemoved;
1881
1882 // Compute A & ((1 << B) | 1)
1883 unsigned RequireUsesRemoved = match(B, m_ImmConstant()) ? 1 : 3;
1884 if (UsesRemoved >= RequireUsesRemoved) {
1885 Value *NewOr =
1886 Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1887 /*HasNUW=*/true),
1888 One, Or->getName());
1889 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1890 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1891 }
1892 }
1893 }
1894
1895 // (icmp eq (and (bitcast X to int), ExponentMask), ExponentMask) -->
1896 // llvm.is.fpclass(X, fcInf|fcNan)
1897 // (icmp ne (and (bitcast X to int), ExponentMask), ExponentMask) -->
1898 // llvm.is.fpclass(X, ~(fcInf|fcNan))
1899 // (icmp eq (and (bitcast X to int), ExponentMask), 0) -->
1900 // llvm.is.fpclass(X, fcSubnormal|fcZero)
1901 // (icmp ne (and (bitcast X to int), ExponentMask), 0) -->
1902 // llvm.is.fpclass(X, ~(fcSubnormal|fcZero))
1903 Value *V;
1904 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1905 Attribute::NoImplicitFloat) &&
1906 Cmp.isEquality() &&
1908 Type *FPType = V->getType()->getScalarType();
1909 if (FPType->isIEEELikeFPTy() && (C1.isZero() || C1 == *C2)) {
1910 APInt ExponentMask =
1911 APFloat::getInf(FPType->getFltSemantics()).bitcastToAPInt();
1912 if (*C2 == ExponentMask) {
1913 unsigned Mask = C1.isZero()
1916 if (isICMP_NE)
1917 Mask = ~Mask & fcAllFlags;
1918 return replaceInstUsesWith(Cmp, Builder.createIsFPClass(V, Mask));
1919 }
1920 }
1921 }
1922
1923 return nullptr;
1924}
1925
1926/// Fold icmp (and X, Y), C.
1929 const APInt &C) {
1930 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1931 return I;
1932
1933 const ICmpInst::Predicate Pred = Cmp.getPredicate();
1934 bool TrueIfNeg;
1935 if (isSignBitCheck(Pred, C, TrueIfNeg)) {
1936 // ((X - 1) & ~X) < 0 --> X == 0
1937 // ((X - 1) & ~X) >= 0 --> X != 0
1938 Value *X;
1939 if (match(And->getOperand(0), m_Add(m_Value(X), m_AllOnes())) &&
1940 match(And->getOperand(1), m_Not(m_Specific(X)))) {
1941 auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1942 return new ICmpInst(NewPred, X, ConstantInt::getNullValue(X->getType()));
1943 }
1944 // (X & -X) < 0 --> X == MinSignedC
1945 // (X & -X) > -1 --> X != MinSignedC
1946 if (match(And, m_c_And(m_Neg(m_Value(X)), m_Deferred(X)))) {
1947 Constant *MinSignedC = ConstantInt::get(
1948 X->getType(),
1949 APInt::getSignedMinValue(X->getType()->getScalarSizeInBits()));
1950 auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1951 return new ICmpInst(NewPred, X, MinSignedC);
1952 }
1953 }
1954
1955 // TODO: These all require that Y is constant too, so refactor with the above.
1956
1957 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1958 Value *X = And->getOperand(0);
1959 Value *Y = And->getOperand(1);
1960 if (auto *C2 = dyn_cast<ConstantInt>(Y))
1961 if (auto *LI = dyn_cast<LoadInst>(X))
1962 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1963 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(LI, GEP, Cmp, C2))
1964 return Res;
1965
1966 if (!Cmp.isEquality())
1967 return nullptr;
1968
1969 // X & -C == -C -> X > u ~C
1970 // X & -C != -C -> X <= u ~C
1971 // iff C is a power of 2
1972 if (Cmp.getOperand(1) == Y && C.isNegatedPowerOf2()) {
1973 auto NewPred =
1975 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1976 }
1977
1978 // ((zext i1 X) & Y) == 0 --> !((trunc Y) & X)
1979 // ((zext i1 X) & Y) != 0 --> ((trunc Y) & X)
1980 // ((zext i1 X) & Y) == 1 --> ((trunc Y) & X)
1981 // ((zext i1 X) & Y) != 1 --> !((trunc Y) & X)
1983 X->getType()->isIntOrIntVectorTy(1) && (C.isZero() || C.isOne())) {
1984 Value *TruncY = Builder.CreateTrunc(Y, X->getType());
1985 if (C.isZero() ^ (Pred == CmpInst::ICMP_NE)) {
1986 Value *And = Builder.CreateAnd(TruncY, X);
1988 }
1989 return BinaryOperator::CreateAnd(TruncY, X);
1990 }
1991
1992 // (icmp eq/ne (and (shl -1, X), Y), 0)
1993 // -> (icmp eq/ne (lshr Y, X), 0)
1994 // We could technically handle any C == 0 or (C < 0 && isOdd(C)) but it seems
1995 // highly unlikely the non-zero case will ever show up in code.
1996 if (C.isZero() &&
1998 m_Value(Y))))) {
1999 Value *LShr = Builder.CreateLShr(Y, X);
2000 return new ICmpInst(Pred, LShr, Constant::getNullValue(LShr->getType()));
2001 }
2002
2003 // (icmp eq/ne (and (add A, Addend), Msk), C)
2004 // -> (icmp eq/ne (and A, Msk), (and (sub C, Addend), Msk))
2005 {
2006 Value *A;
2007 const APInt *Addend, *Msk;
2008 if (match(And, m_And(m_OneUse(m_Add(m_Value(A), m_APInt(Addend))),
2009 m_LowBitMask(Msk))) &&
2010 C.ule(*Msk)) {
2011 APInt NewComperand = (C - *Addend) & *Msk;
2012 Value *MaskA = Builder.CreateAnd(A, ConstantInt::get(A->getType(), *Msk));
2013 return new ICmpInst(Pred, MaskA,
2014 ConstantInt::get(MaskA->getType(), NewComperand));
2015 }
2016 }
2017
2018 return nullptr;
2019}
2020
2021/// Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
2023 InstCombiner::BuilderTy &Builder) {
2024 // Are we using xors or subs to bitwise check for a pair or pairs of
2025 // (in)equalities? Convert to a shorter form that has more potential to be
2026 // folded even further.
2027 // ((X1 ^/- X2) || (X3 ^/- X4)) == 0 --> (X1 == X2) && (X3 == X4)
2028 // ((X1 ^/- X2) || (X3 ^/- X4)) != 0 --> (X1 != X2) || (X3 != X4)
2029 // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) == 0 -->
2030 // (X1 == X2) && (X3 == X4) && (X5 == X6)
2031 // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) != 0 -->
2032 // (X1 != X2) || (X3 != X4) || (X5 != X6)
2034 SmallVector<Value *, 16> WorkList(1, Or);
2035
2036 while (!WorkList.empty()) {
2037 auto MatchOrOperatorArgument = [&](Value *OrOperatorArgument) {
2038 Value *Lhs, *Rhs;
2039
2040 if (match(OrOperatorArgument,
2041 m_OneUse(m_Xor(m_Value(Lhs), m_Value(Rhs))))) {
2042 CmpValues.emplace_back(Lhs, Rhs);
2043 return;
2044 }
2045
2046 if (match(OrOperatorArgument,
2047 m_OneUse(m_Sub(m_Value(Lhs), m_Value(Rhs))))) {
2048 CmpValues.emplace_back(Lhs, Rhs);
2049 return;
2050 }
2051
2052 WorkList.push_back(OrOperatorArgument);
2053 };
2054
2055 Value *CurrentValue = WorkList.pop_back_val();
2056 Value *OrOperatorLhs, *OrOperatorRhs;
2057
2058 if (!match(CurrentValue,
2059 m_Or(m_Value(OrOperatorLhs), m_Value(OrOperatorRhs)))) {
2060 return nullptr;
2061 }
2062
2063 MatchOrOperatorArgument(OrOperatorRhs);
2064 MatchOrOperatorArgument(OrOperatorLhs);
2065 }
2066
2067 ICmpInst::Predicate Pred = Cmp.getPredicate();
2068 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2069 Value *LhsCmp = Builder.CreateICmp(Pred, CmpValues.rbegin()->first,
2070 CmpValues.rbegin()->second);
2071
2072 for (auto It = CmpValues.rbegin() + 1; It != CmpValues.rend(); ++It) {
2073 Value *RhsCmp = Builder.CreateICmp(Pred, It->first, It->second);
2074 LhsCmp = Builder.CreateBinOp(BOpc, LhsCmp, RhsCmp);
2075 }
2076
2077 return LhsCmp;
2078}
2079
2080/// Fold icmp (or X, Y), C.
2083 const APInt &C) {
2084 ICmpInst::Predicate Pred = Cmp.getPredicate();
2085 if (C.isOne()) {
2086 // icmp slt signum(V) 1 --> icmp slt V, 1
2087 Value *V = nullptr;
2088 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
2089 return new ICmpInst(ICmpInst::ICMP_SLT, V,
2090 ConstantInt::get(V->getType(), 1));
2091 }
2092
2093 Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
2094
2095 // (icmp eq/ne (or disjoint x, C0), C1)
2096 // -> (icmp eq/ne x, C0^C1)
2097 if (Cmp.isEquality() && match(OrOp1, m_ImmConstant()) &&
2098 cast<PossiblyDisjointInst>(Or)->isDisjoint()) {
2099 Value *NewC =
2100 Builder.CreateXor(OrOp1, ConstantInt::get(OrOp1->getType(), C));
2101 return new ICmpInst(Pred, OrOp0, NewC);
2102 }
2103
2104 const APInt *MaskC;
2105 if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
2106 if (*MaskC == C && (C + 1).isPowerOf2()) {
2107 // X | C == C --> X <=u C
2108 // X | C != C --> X >u C
2109 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
2111 return new ICmpInst(Pred, OrOp0, OrOp1);
2112 }
2113
2114 // More general: canonicalize 'equality with set bits mask' to
2115 // 'equality with clear bits mask'.
2116 // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
2117 // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
2118 if (Or->hasOneUse()) {
2119 Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
2120 Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
2121 return new ICmpInst(Pred, And, NewC);
2122 }
2123 }
2124
2125 // (X | (X-1)) s< 0 --> X s< 1
2126 // (X | (X-1)) s> -1 --> X s> 0
2127 Value *X;
2128 bool TrueIfSigned;
2129 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
2131 auto NewPred = TrueIfSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGT;
2132 Constant *NewC = ConstantInt::get(X->getType(), TrueIfSigned ? 1 : 0);
2133 return new ICmpInst(NewPred, X, NewC);
2134 }
2135
2136 const APInt *OrC;
2137 // icmp(X | OrC, C) --> icmp(X, 0)
2138 if (C.isNonNegative() && match(Or, m_Or(m_Value(X), m_APInt(OrC)))) {
2139 switch (Pred) {
2140 // X | OrC s< C --> X s< 0 iff OrC s>= C s>= 0
2141 case ICmpInst::ICMP_SLT:
2142 // X | OrC s>= C --> X s>= 0 iff OrC s>= C s>= 0
2143 case ICmpInst::ICMP_SGE:
2144 if (OrC->sge(C))
2145 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2146 break;
2147 // X | OrC s<= C --> X s< 0 iff OrC s> C s>= 0
2148 case ICmpInst::ICMP_SLE:
2149 // X | OrC s> C --> X s>= 0 iff OrC s> C s>= 0
2150 case ICmpInst::ICMP_SGT:
2151 if (OrC->sgt(C))
2153 ConstantInt::getNullValue(X->getType()));
2154 break;
2155 default:
2156 break;
2157 }
2158 }
2159
2160 if (!Cmp.isEquality() || !C.isZero() || !Or->hasOneUse())
2161 return nullptr;
2162
2163 Value *P, *Q;
2165 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
2166 // -> and (icmp eq P, null), (icmp eq Q, null).
2167 Value *CmpP =
2168 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
2169 Value *CmpQ =
2170 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
2171 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2172 return BinaryOperator::Create(BOpc, CmpP, CmpQ);
2173 }
2174
2175 if (Value *V = foldICmpOrXorSubChain(Cmp, Or, Builder))
2176 return replaceInstUsesWith(Cmp, V);
2177
2178 return nullptr;
2179}
2180
2181/// Fold icmp (mul X, Y), C.
2184 const APInt &C) {
2185 ICmpInst::Predicate Pred = Cmp.getPredicate();
2186 Type *MulTy = Mul->getType();
2187 Value *X = Mul->getOperand(0);
2188
2189 // If there's no overflow:
2190 // X * X == 0 --> X == 0
2191 // X * X != 0 --> X != 0
2192 if (Cmp.isEquality() && C.isZero() && X == Mul->getOperand(1) &&
2193 (Mul->hasNoUnsignedWrap() || Mul->hasNoSignedWrap()))
2194 return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2195
2196 const APInt *MulC;
2197 if (!match(Mul->getOperand(1), m_APInt(MulC)))
2198 return nullptr;
2199
2200 // If this is a test of the sign bit and the multiply is sign-preserving with
2201 // a constant operand, use the multiply LHS operand instead:
2202 // (X * +MulC) < 0 --> X < 0
2203 // (X * -MulC) < 0 --> X > 0
2204 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
2205 if (MulC->isNegative())
2206 Pred = ICmpInst::getSwappedPredicate(Pred);
2207 return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2208 }
2209
2210 if (MulC->isZero())
2211 return nullptr;
2212
2213 // If the multiply does not wrap or the constant is odd, try to divide the
2214 // compare constant by the multiplication factor.
2215 if (Cmp.isEquality()) {
2216 // (mul nsw X, MulC) eq/ne C --> X eq/ne C /s MulC
2217 if (Mul->hasNoSignedWrap() && C.srem(*MulC).isZero()) {
2218 Constant *NewC = ConstantInt::get(MulTy, C.sdiv(*MulC));
2219 return new ICmpInst(Pred, X, NewC);
2220 }
2221
2222 // C % MulC == 0 is weaker than we could use if MulC is odd because it
2223 // correct to transform if MulC * N == C including overflow. I.e with i8
2224 // (icmp eq (mul X, 5), 101) -> (icmp eq X, 225) but since 101 % 5 != 0, we
2225 // miss that case.
2226 if (C.urem(*MulC).isZero()) {
2227 // (mul nuw X, MulC) eq/ne C --> X eq/ne C /u MulC
2228 // (mul X, OddC) eq/ne N * C --> X eq/ne N
2229 if ((*MulC & 1).isOne() || Mul->hasNoUnsignedWrap()) {
2230 Constant *NewC = ConstantInt::get(MulTy, C.udiv(*MulC));
2231 return new ICmpInst(Pred, X, NewC);
2232 }
2233 }
2234 }
2235
2236 // With a matching no-overflow guarantee, fold the constants:
2237 // (X * MulC) < C --> X < (C / MulC)
2238 // (X * MulC) > C --> X > (C / MulC)
2239 // TODO: Assert that Pred is not equal to SGE, SLE, UGE, ULE?
2240 Constant *NewC = nullptr;
2241 if (Mul->hasNoSignedWrap() && ICmpInst::isSigned(Pred)) {
2242 // MININT / -1 --> overflow.
2243 if (C.isMinSignedValue() && MulC->isAllOnes())
2244 return nullptr;
2245 if (MulC->isNegative())
2246 Pred = ICmpInst::getSwappedPredicate(Pred);
2247
2248 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2249 NewC = ConstantInt::get(
2251 } else {
2252 assert((Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_SGT) &&
2253 "Unexpected predicate");
2254 NewC = ConstantInt::get(
2256 }
2257 } else if (Mul->hasNoUnsignedWrap() && ICmpInst::isUnsigned(Pred)) {
2258 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) {
2259 NewC = ConstantInt::get(
2261 } else {
2262 assert((Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
2263 "Unexpected predicate");
2264 NewC = ConstantInt::get(
2266 }
2267 }
2268
2269 return NewC ? new ICmpInst(Pred, X, NewC) : nullptr;
2270}
2271
2272/// Fold icmp (shl nuw C2, Y), C.
2274 const APInt &C) {
2275 Value *Y;
2276 const APInt *C2;
2277 if (!match(Shl, m_NUWShl(m_APInt(C2), m_Value(Y))))
2278 return nullptr;
2279
2280 Type *ShiftType = Shl->getType();
2281 unsigned TypeBits = C.getBitWidth();
2282 ICmpInst::Predicate Pred = Cmp.getPredicate();
2283 if (Cmp.isUnsigned()) {
2284 if (C2->isZero() || C2->ugt(C))
2285 return nullptr;
2286 APInt Div, Rem;
2287 APInt::udivrem(C, *C2, Div, Rem);
2288 bool CIsPowerOf2 = Rem.isZero() && Div.isPowerOf2();
2289
2290 // (1 << Y) pred C -> Y pred Log2(C)
2291 if (!CIsPowerOf2) {
2292 // (1 << Y) < 30 -> Y <= 4
2293 // (1 << Y) <= 30 -> Y <= 4
2294 // (1 << Y) >= 30 -> Y > 4
2295 // (1 << Y) > 30 -> Y > 4
2296 if (Pred == ICmpInst::ICMP_ULT)
2297 Pred = ICmpInst::ICMP_ULE;
2298 else if (Pred == ICmpInst::ICMP_UGE)
2299 Pred = ICmpInst::ICMP_UGT;
2300 }
2301
2302 unsigned CLog2 = Div.logBase2();
2303 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
2304 } else if (Cmp.isSigned() && C2->isOne()) {
2305 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2306 // (1 << Y) > 0 -> Y != 31
2307 // (1 << Y) > C -> Y != 31 if C is negative.
2308 if (Pred == ICmpInst::ICMP_SGT && C.sle(0))
2309 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2310
2311 // (1 << Y) < 0 -> Y == 31
2312 // (1 << Y) < 1 -> Y == 31
2313 // (1 << Y) < C -> Y == 31 if C is negative and not signed min.
2314 // Exclude signed min by subtracting 1 and lower the upper bound to 0.
2315 if (Pred == ICmpInst::ICMP_SLT && (C - 1).sle(0))
2316 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2317 }
2318
2319 return nullptr;
2320}
2321
2322/// Fold icmp (shl X, Y), C.
2324 BinaryOperator *Shl,
2325 const APInt &C) {
2326 const APInt *ShiftVal;
2327 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2328 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2329
2330 ICmpInst::Predicate Pred = Cmp.getPredicate();
2331 // (icmp pred (shl nuw&nsw X, Y), Csle0)
2332 // -> (icmp pred X, Csle0)
2333 //
2334 // The idea is the nuw/nsw essentially freeze the sign bit for the shift op
2335 // so X's must be what is used.
2336 if (C.sle(0) && Shl->hasNoUnsignedWrap() && Shl->hasNoSignedWrap())
2337 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2338
2339 // (icmp eq/ne (shl nuw|nsw X, Y), 0)
2340 // -> (icmp eq/ne X, 0)
2341 if (ICmpInst::isEquality(Pred) && C.isZero() &&
2342 (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap()))
2343 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2344
2345 // (icmp slt (shl nsw X, Y), 0/1)
2346 // -> (icmp slt X, 0/1)
2347 // (icmp sgt (shl nsw X, Y), 0/-1)
2348 // -> (icmp sgt X, 0/-1)
2349 //
2350 // NB: sge/sle with a constant will canonicalize to sgt/slt.
2351 if (Shl->hasNoSignedWrap() &&
2352 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT))
2353 if (C.isZero() || (Pred == ICmpInst::ICMP_SGT ? C.isAllOnes() : C.isOne()))
2354 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2355
2356 const APInt *ShiftAmt;
2357 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2358 return foldICmpShlLHSC(Cmp, Shl, C);
2359
2360 // Check that the shift amount is in range. If not, don't perform undefined
2361 // shifts. When the shift is visited, it will be simplified.
2362 unsigned TypeBits = C.getBitWidth();
2363 if (ShiftAmt->uge(TypeBits))
2364 return nullptr;
2365
2366 Value *X = Shl->getOperand(0);
2367 Type *ShType = Shl->getType();
2368
2369 // NSW guarantees that we are only shifting out sign bits from the high bits,
2370 // so we can ASHR the compare constant without needing a mask and eliminate
2371 // the shift.
2372 if (Shl->hasNoSignedWrap()) {
2373 if (Pred == ICmpInst::ICMP_SGT) {
2374 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2375 APInt ShiftedC = C.ashr(*ShiftAmt);
2376 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2377 }
2378 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2379 C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2380 APInt ShiftedC = C.ashr(*ShiftAmt);
2381 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2382 }
2383 if (Pred == ICmpInst::ICMP_SLT) {
2384 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2385 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2386 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2387 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2388 assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2389 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2390 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2391 }
2392 }
2393
2394 // NUW guarantees that we are only shifting out zero bits from the high bits,
2395 // so we can LSHR the compare constant without needing a mask and eliminate
2396 // the shift.
2397 if (Shl->hasNoUnsignedWrap()) {
2398 if (Pred == ICmpInst::ICMP_UGT) {
2399 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2400 APInt ShiftedC = C.lshr(*ShiftAmt);
2401 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2402 }
2403 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2404 C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2405 APInt ShiftedC = C.lshr(*ShiftAmt);
2406 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2407 }
2408 if (Pred == ICmpInst::ICMP_ULT) {
2409 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2410 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2411 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2412 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2413 assert(C.ugt(0) && "ult 0 should have been eliminated");
2414 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2415 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2416 }
2417 }
2418
2419 if (Cmp.isEquality() && Shl->hasOneUse()) {
2420 // Strength-reduce the shift into an 'and'.
2421 Constant *Mask = ConstantInt::get(
2422 ShType,
2423 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2424 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2425 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2426 return new ICmpInst(Pred, And, LShrC);
2427 }
2428
2429 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2430 bool TrueIfSigned = false;
2431 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2432 // (X << 31) <s 0 --> (X & 1) != 0
2433 Constant *Mask = ConstantInt::get(
2434 ShType,
2435 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2436 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2437 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2438 And, Constant::getNullValue(ShType));
2439 }
2440
2441 // Simplify 'shl' inequality test into 'and' equality test.
2442 if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2443 // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2444 if ((C + 1).isPowerOf2() &&
2445 (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2446 Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2447 return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2449 And, Constant::getNullValue(ShType));
2450 }
2451 // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2452 if (C.isPowerOf2() &&
2453 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2454 Value *And =
2455 Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2456 return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2458 And, Constant::getNullValue(ShType));
2459 }
2460 }
2461
2462 // Transform (icmp pred iM (shl iM %v, N), C)
2463 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2464 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2465 // This enables us to get rid of the shift in favor of a trunc that may be
2466 // free on the target. It has the additional benefit of comparing to a
2467 // smaller constant that may be more target-friendly.
2468 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2469 if (Shl->hasOneUse() && Amt != 0 &&
2470 shouldChangeType(ShType->getScalarSizeInBits(), TypeBits - Amt)) {
2471 ICmpInst::Predicate CmpPred = Pred;
2472 APInt RHSC = C;
2473
2474 if (RHSC.countr_zero() < Amt && ICmpInst::isStrictPredicate(CmpPred)) {
2475 // Try the flipped strictness predicate.
2476 // e.g.:
2477 // icmp ult i64 (shl X, 32), 8589934593 ->
2478 // icmp ule i64 (shl X, 32), 8589934592 ->
2479 // icmp ule i32 (trunc X, i32), 2 ->
2480 // icmp ult i32 (trunc X, i32), 3
2481 if (auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(
2482 Pred, ConstantInt::get(ShType->getContext(), C))) {
2483 CmpPred = FlippedStrictness->first;
2484 RHSC = cast<ConstantInt>(FlippedStrictness->second)->getValue();
2485 }
2486 }
2487
2488 if (RHSC.countr_zero() >= Amt) {
2489 Type *TruncTy = ShType->getWithNewBitWidth(TypeBits - Amt);
2490 Constant *NewC =
2491 ConstantInt::get(TruncTy, RHSC.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2492 return new ICmpInst(CmpPred,
2493 Builder.CreateTrunc(X, TruncTy, "", /*IsNUW=*/false,
2494 Shl->hasNoSignedWrap()),
2495 NewC);
2496 }
2497 }
2498
2499 return nullptr;
2500}
2501
2502/// Fold icmp ({al}shr X, Y), C.
2504 BinaryOperator *Shr,
2505 const APInt &C) {
2506 // An exact shr only shifts out zero bits, so:
2507 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2508 Value *X = Shr->getOperand(0);
2509 CmpInst::Predicate Pred = Cmp.getPredicate();
2510 if (Cmp.isEquality() && Shr->isExact() && C.isZero())
2511 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2512
2513 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2514 const APInt *ShiftValC;
2515 if (match(X, m_APInt(ShiftValC))) {
2516 if (Cmp.isEquality())
2517 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftValC);
2518
2519 // (ShiftValC >> Y) >s -1 --> Y != 0 with ShiftValC < 0
2520 // (ShiftValC >> Y) <s 0 --> Y == 0 with ShiftValC < 0
2521 bool TrueIfSigned;
2522 if (!IsAShr && ShiftValC->isNegative() &&
2523 isSignBitCheck(Pred, C, TrueIfSigned))
2524 return new ICmpInst(TrueIfSigned ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE,
2525 Shr->getOperand(1),
2526 ConstantInt::getNullValue(X->getType()));
2527
2528 // If the shifted constant is a power-of-2, test the shift amount directly:
2529 // (ShiftValC >> Y) >u C --> X <u (LZ(C) - LZ(ShiftValC))
2530 // (ShiftValC >> Y) <u C --> X >=u (LZ(C-1) - LZ(ShiftValC))
2531 if (!IsAShr && ShiftValC->isPowerOf2() &&
2532 (Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_ULT)) {
2533 bool IsUGT = Pred == CmpInst::ICMP_UGT;
2534 assert(ShiftValC->uge(C) && "Expected simplify of compare");
2535 assert((IsUGT || !C.isZero()) && "Expected X u< 0 to simplify");
2536
2537 unsigned CmpLZ = IsUGT ? C.countl_zero() : (C - 1).countl_zero();
2538 unsigned ShiftLZ = ShiftValC->countl_zero();
2539 Constant *NewC = ConstantInt::get(Shr->getType(), CmpLZ - ShiftLZ);
2540 auto NewPred = IsUGT ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
2541 return new ICmpInst(NewPred, Shr->getOperand(1), NewC);
2542 }
2543 }
2544
2545 const APInt *ShiftAmtC;
2546 if (!match(Shr->getOperand(1), m_APInt(ShiftAmtC)))
2547 return nullptr;
2548
2549 // Check that the shift amount is in range. If not, don't perform undefined
2550 // shifts. When the shift is visited it will be simplified.
2551 unsigned TypeBits = C.getBitWidth();
2552 unsigned ShAmtVal = ShiftAmtC->getLimitedValue(TypeBits);
2553 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2554 return nullptr;
2555
2556 bool IsExact = Shr->isExact();
2557 Type *ShrTy = Shr->getType();
2558 // TODO: If we could guarantee that InstSimplify would handle all of the
2559 // constant-value-based preconditions in the folds below, then we could assert
2560 // those conditions rather than checking them. This is difficult because of
2561 // undef/poison (PR34838).
2562 if (IsAShr && Shr->hasOneUse()) {
2563 if (IsExact && (Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) &&
2564 (C - 1).isPowerOf2() && C.countLeadingZeros() > ShAmtVal) {
2565 // When C - 1 is a power of two and the transform can be legally
2566 // performed, prefer this form so the produced constant is close to a
2567 // power of two.
2568 // icmp slt/ult (ashr exact X, ShAmtC), C
2569 // --> icmp slt/ult X, (C - 1) << ShAmtC) + 1
2570 APInt ShiftedC = (C - 1).shl(ShAmtVal) + 1;
2571 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2572 }
2573 if (IsExact || Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) {
2574 // When ShAmtC can be shifted losslessly:
2575 // icmp PRED (ashr exact X, ShAmtC), C --> icmp PRED X, (C << ShAmtC)
2576 // icmp slt/ult (ashr X, ShAmtC), C --> icmp slt/ult X, (C << ShAmtC)
2577 APInt ShiftedC = C.shl(ShAmtVal);
2578 if (ShiftedC.ashr(ShAmtVal) == C)
2579 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2580 }
2581 if (Pred == CmpInst::ICMP_SGT) {
2582 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2583 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2584 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2585 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2586 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2587 }
2588 if (Pred == CmpInst::ICMP_UGT) {
2589 // icmp ugt (ashr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2590 // 'C + 1 << ShAmtC' can overflow as a signed number, so the 2nd
2591 // clause accounts for that pattern.
2592 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2593 if ((ShiftedC + 1).ashr(ShAmtVal) == (C + 1) ||
2594 (C + 1).shl(ShAmtVal).isMinSignedValue())
2595 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2596 }
2597
2598 // If the compare constant has significant bits above the lowest sign-bit,
2599 // then convert an unsigned cmp to a test of the sign-bit:
2600 // (ashr X, ShiftC) u> C --> X s< 0
2601 // (ashr X, ShiftC) u< C --> X s> -1
2602 if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) {
2603 if (Pred == CmpInst::ICMP_UGT) {
2604 return new ICmpInst(CmpInst::ICMP_SLT, X,
2606 }
2607 if (Pred == CmpInst::ICMP_ULT) {
2608 return new ICmpInst(CmpInst::ICMP_SGT, X,
2610 }
2611 }
2612 } else if (!IsAShr) {
2613 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2614 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2615 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2616 APInt ShiftedC = C.shl(ShAmtVal);
2617 if (ShiftedC.lshr(ShAmtVal) == C)
2618 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2619 }
2620 if (Pred == CmpInst::ICMP_UGT) {
2621 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2622 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2623 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2624 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2625 }
2626 }
2627
2628 if (!Cmp.isEquality())
2629 return nullptr;
2630
2631 // Handle equality comparisons of shift-by-constant.
2632
2633 // If the comparison constant changes with the shift, the comparison cannot
2634 // succeed (bits of the comparison constant cannot match the shifted value).
2635 // This should be known by InstSimplify and already be folded to true/false.
2636 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2637 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2638 "Expected icmp+shr simplify did not occur.");
2639
2640 // If the bits shifted out are known zero, compare the unshifted value:
2641 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2642 if (Shr->isExact())
2643 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2644
2645 if (Shr->hasOneUse()) {
2646 // Canonicalize the shift into an 'and':
2647 // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2648 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2649 Constant *Mask = ConstantInt::get(ShrTy, Val);
2650 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2651 return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2652 }
2653
2654 return nullptr;
2655}
2656
2658 BinaryOperator *SRem,
2659 const APInt &C) {
2660 const ICmpInst::Predicate Pred = Cmp.getPredicate();
2661 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT) {
2662 // Canonicalize unsigned predicates to signed:
2663 // (X s% DivisorC) u> C -> (X s% DivisorC) s< 0
2664 // iff (C s< 0 ? ~C : C) u>= abs(DivisorC)-1
2665 // (X s% DivisorC) u< C+1 -> (X s% DivisorC) s> -1
2666 // iff (C+1 s< 0 ? ~C : C) u>= abs(DivisorC)-1
2667
2668 const APInt *DivisorC;
2669 if (!match(SRem->getOperand(1), m_APInt(DivisorC)))
2670 return nullptr;
2671 if (DivisorC->isZero())
2672 return nullptr;
2673
2674 APInt NormalizedC = C;
2675 if (Pred == ICmpInst::ICMP_ULT) {
2676 assert(!NormalizedC.isZero() &&
2677 "ult X, 0 should have been simplified already.");
2678 --NormalizedC;
2679 }
2680 if (C.isNegative())
2681 NormalizedC.flipAllBits();
2682 if (!NormalizedC.uge(DivisorC->abs() - 1))
2683 return nullptr;
2684
2685 Type *Ty = SRem->getType();
2686 if (Pred == ICmpInst::ICMP_UGT)
2687 return new ICmpInst(ICmpInst::ICMP_SLT, SRem,
2689 return new ICmpInst(ICmpInst::ICMP_SGT, SRem,
2691 }
2692 // Match an 'is positive' or 'is negative' comparison of remainder by a
2693 // constant power-of-2 value:
2694 // (X % pow2C) sgt/slt 0
2695 if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT &&
2696 Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2697 return nullptr;
2698
2699 // TODO: The one-use check is standard because we do not typically want to
2700 // create longer instruction sequences, but this might be a special-case
2701 // because srem is not good for analysis or codegen.
2702 if (!SRem->hasOneUse())
2703 return nullptr;
2704
2705 const APInt *DivisorC;
2706 if (!match(SRem->getOperand(1), m_Power2(DivisorC)))
2707 return nullptr;
2708
2709 // For cmp_sgt/cmp_slt only zero valued C is handled.
2710 // For cmp_eq/cmp_ne only positive valued C is handled.
2711 if (((Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT) &&
2712 !C.isZero()) ||
2713 ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2714 !C.isStrictlyPositive()))
2715 return nullptr;
2716
2717 // Mask off the sign bit and the modulo bits (low-bits).
2718 Type *Ty = SRem->getType();
2719 APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2720 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2721 Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2722
2723 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)
2724 return new ICmpInst(Pred, And, ConstantInt::get(Ty, C));
2725
2726 // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2727 // bit is set. Example:
2728 // (i8 X % 32) s> 0 --> (X & 159) s> 0
2729 if (Pred == ICmpInst::ICMP_SGT)
2731
2732 // For 'is negative?' check that the sign-bit is set and at least 1 masked
2733 // bit is set. Example:
2734 // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2735 return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2736}
2737
2738/// Fold icmp (udiv X, Y), C.
2740 BinaryOperator *UDiv,
2741 const APInt &C) {
2742 ICmpInst::Predicate Pred = Cmp.getPredicate();
2743 Value *X = UDiv->getOperand(0);
2744 Value *Y = UDiv->getOperand(1);
2745 Type *Ty = UDiv->getType();
2746
2747 const APInt *C2;
2748 if (!match(X, m_APInt(C2)))
2749 return nullptr;
2750
2751 assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2752
2753 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2754 if (Pred == ICmpInst::ICMP_UGT) {
2755 assert(!C.isMaxValue() &&
2756 "icmp ugt X, UINT_MAX should have been simplified already.");
2757 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2758 ConstantInt::get(Ty, C2->udiv(C + 1)));
2759 }
2760
2761 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2762 if (Pred == ICmpInst::ICMP_ULT) {
2763 assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2764 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2765 ConstantInt::get(Ty, C2->udiv(C)));
2766 }
2767
2768 return nullptr;
2769}
2770
2771/// Fold icmp ({su}div X, Y), C.
2773 BinaryOperator *Div,
2774 const APInt &C) {
2775 ICmpInst::Predicate Pred = Cmp.getPredicate();
2776 Value *X = Div->getOperand(0);
2777 Value *Y = Div->getOperand(1);
2778 Type *Ty = Div->getType();
2779 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2780
2781 // If unsigned division and the compare constant is bigger than
2782 // UMAX/2 (negative), there's only one pair of values that satisfies an
2783 // equality check, so eliminate the division:
2784 // (X u/ Y) == C --> (X == C) && (Y == 1)
2785 // (X u/ Y) != C --> (X != C) || (Y != 1)
2786 // Similarly, if signed division and the compare constant is exactly SMIN:
2787 // (X s/ Y) == SMIN --> (X == SMIN) && (Y == 1)
2788 // (X s/ Y) != SMIN --> (X != SMIN) || (Y != 1)
2789 if (Cmp.isEquality() && Div->hasOneUse() && C.isSignBitSet() &&
2790 (!DivIsSigned || C.isMinSignedValue())) {
2791 Value *XBig = Builder.CreateICmp(Pred, X, ConstantInt::get(Ty, C));
2792 Value *YOne = Builder.CreateICmp(Pred, Y, ConstantInt::get(Ty, 1));
2793 auto Logic = Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2794 return BinaryOperator::Create(Logic, XBig, YOne);
2795 }
2796
2797 // Fold: icmp pred ([us]div X, C2), C -> range test
2798 // Fold this div into the comparison, producing a range check.
2799 // Determine, based on the divide type, what the range is being
2800 // checked. If there is an overflow on the low or high side, remember
2801 // it, otherwise compute the range [low, hi) bounding the new value.
2802 // See: InsertRangeTest above for the kinds of replacements possible.
2803 const APInt *C2;
2804 if (!match(Y, m_APInt(C2)))
2805 return nullptr;
2806
2807 // FIXME: If the operand types don't match the type of the divide
2808 // then don't attempt this transform. The code below doesn't have the
2809 // logic to deal with a signed divide and an unsigned compare (and
2810 // vice versa). This is because (x /s C2) <s C produces different
2811 // results than (x /s C2) <u C or (x /u C2) <s C or even
2812 // (x /u C2) <u C. Simply casting the operands and result won't
2813 // work. :( The if statement below tests that condition and bails
2814 // if it finds it.
2815 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2816 return nullptr;
2817
2818 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2819 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2820 // division-by-constant cases should be present, we can not assert that they
2821 // have happened before we reach this icmp instruction.
2822 if (C2->isZero() || C2->isOne() || (DivIsSigned && C2->isAllOnes()))
2823 return nullptr;
2824
2825 // Compute Prod = C * C2. We are essentially solving an equation of
2826 // form X / C2 = C. We solve for X by multiplying C2 and C.
2827 // By solving for X, we can turn this into a range check instead of computing
2828 // a divide.
2829 APInt Prod = C * *C2;
2830
2831 // Determine if the product overflows by seeing if the product is not equal to
2832 // the divide. Make sure we do the same kind of divide as in the LHS
2833 // instruction that we're folding.
2834 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2835
2836 // If the division is known to be exact, then there is no remainder from the
2837 // divide, so the covered range size is unit, otherwise it is the divisor.
2838 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2839
2840 // Figure out the interval that is being checked. For example, a comparison
2841 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2842 // Compute this interval based on the constants involved and the signedness of
2843 // the compare/divide. This computes a half-open interval, keeping track of
2844 // whether either value in the interval overflows. After analysis each
2845 // overflow variable is set to 0 if it's corresponding bound variable is valid
2846 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2847 int LoOverflow = 0, HiOverflow = 0;
2848 APInt LoBound, HiBound;
2849
2850 if (!DivIsSigned) { // udiv
2851 // e.g. X/5 op 3 --> [15, 20)
2852 LoBound = Prod;
2853 HiOverflow = LoOverflow = ProdOV;
2854 if (!HiOverflow) {
2855 // If this is not an exact divide, then many values in the range collapse
2856 // to the same result value.
2857 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2858 }
2859 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2860 if (C.isZero()) { // (X / pos) op 0
2861 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2862 LoBound = -(RangeSize - 1);
2863 HiBound = RangeSize;
2864 } else if (C.isStrictlyPositive()) { // (X / pos) op pos
2865 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2866 HiOverflow = LoOverflow = ProdOV;
2867 if (!HiOverflow)
2868 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2869 } else { // (X / pos) op neg
2870 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2871 HiBound = Prod + 1;
2872 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2873 if (!LoOverflow) {
2874 APInt DivNeg = -RangeSize;
2875 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2876 }
2877 }
2878 } else if (C2->isNegative()) { // Divisor is < 0.
2879 if (Div->isExact())
2880 RangeSize.negate();
2881 if (C.isZero()) { // (X / neg) op 0
2882 // e.g. X/-5 op 0 --> [-4, 5)
2883 LoBound = RangeSize + 1;
2884 HiBound = -RangeSize;
2885 if (HiBound == *C2) { // -INTMIN = INTMIN
2886 HiOverflow = 1; // [INTMIN+1, overflow)
2887 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN
2888 }
2889 } else if (C.isStrictlyPositive()) { // (X / neg) op pos
2890 // e.g. X/-5 op 3 --> [-19, -14)
2891 HiBound = Prod + 1;
2892 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2893 if (!LoOverflow)
2894 LoOverflow =
2895 addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1 : 0;
2896 } else { // (X / neg) op neg
2897 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2898 LoOverflow = HiOverflow = ProdOV;
2899 if (!HiOverflow)
2900 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2901 }
2902
2903 // Dividing by a negative swaps the condition. LT <-> GT
2904 Pred = ICmpInst::getSwappedPredicate(Pred);
2905 }
2906
2907 switch (Pred) {
2908 default:
2909 llvm_unreachable("Unhandled icmp predicate!");
2910 case ICmpInst::ICMP_EQ:
2911 if (LoOverflow && HiOverflow)
2912 return replaceInstUsesWith(Cmp, Builder.getFalse());
2913 if (HiOverflow)
2914 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2915 X, ConstantInt::get(Ty, LoBound));
2916 if (LoOverflow)
2917 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2918 X, ConstantInt::get(Ty, HiBound));
2919 return replaceInstUsesWith(
2920 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2921 case ICmpInst::ICMP_NE:
2922 if (LoOverflow && HiOverflow)
2923 return replaceInstUsesWith(Cmp, Builder.getTrue());
2924 if (HiOverflow)
2925 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2926 X, ConstantInt::get(Ty, LoBound));
2927 if (LoOverflow)
2928 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2929 X, ConstantInt::get(Ty, HiBound));
2930 return replaceInstUsesWith(
2931 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, false));
2932 case ICmpInst::ICMP_ULT:
2933 case ICmpInst::ICMP_SLT:
2934 if (LoOverflow == +1) // Low bound is greater than input range.
2935 return replaceInstUsesWith(Cmp, Builder.getTrue());
2936 if (LoOverflow == -1) // Low bound is less than input range.
2937 return replaceInstUsesWith(Cmp, Builder.getFalse());
2938 return new ICmpInst(Pred, X, ConstantInt::get(Ty, LoBound));
2939 case ICmpInst::ICMP_UGT:
2940 case ICmpInst::ICMP_SGT:
2941 if (HiOverflow == +1) // High bound greater than input range.
2942 return replaceInstUsesWith(Cmp, Builder.getFalse());
2943 if (HiOverflow == -1) // High bound less than input range.
2944 return replaceInstUsesWith(Cmp, Builder.getTrue());
2945 if (Pred == ICmpInst::ICMP_UGT)
2946 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, HiBound));
2947 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, HiBound));
2948 }
2949
2950 return nullptr;
2951}
2952
2953/// Fold icmp (sub X, Y), C.
2956 const APInt &C) {
2957 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2958 ICmpInst::Predicate Pred = Cmp.getPredicate();
2959 Type *Ty = Sub->getType();
2960
2961 // (SubC - Y) == C) --> Y == (SubC - C)
2962 // (SubC - Y) != C) --> Y != (SubC - C)
2963 Constant *SubC;
2964 if (Cmp.isEquality() && match(X, m_ImmConstant(SubC))) {
2965 return new ICmpInst(Pred, Y,
2966 ConstantExpr::getSub(SubC, ConstantInt::get(Ty, C)));
2967 }
2968
2969 // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2970 const APInt *C2;
2971 APInt SubResult;
2972 ICmpInst::Predicate SwappedPred = Cmp.getSwappedPredicate();
2973 bool HasNSW = Sub->hasNoSignedWrap();
2974 bool HasNUW = Sub->hasNoUnsignedWrap();
2975 if (match(X, m_APInt(C2)) &&
2976 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2977 !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2978 return new ICmpInst(SwappedPred, Y, ConstantInt::get(Ty, SubResult));
2979
2980 // X - Y == 0 --> X == Y.
2981 // X - Y != 0 --> X != Y.
2982 // TODO: We allow this with multiple uses as long as the other uses are not
2983 // in phis. The phi use check is guarding against a codegen regression
2984 // for a loop test. If the backend could undo this (and possibly
2985 // subsequent transforms), we would not need this hack.
2986 if (Cmp.isEquality() && C.isZero() &&
2987 none_of((Sub->users()), [](const User *U) { return isa<PHINode>(U); }))
2988 return new ICmpInst(Pred, X, Y);
2989
2990 // The following transforms are only worth it if the only user of the subtract
2991 // is the icmp.
2992 // TODO: This is an artificial restriction for all of the transforms below
2993 // that only need a single replacement icmp. Can these use the phi test
2994 // like the transform above here?
2995 if (!Sub->hasOneUse())
2996 return nullptr;
2997
2998 if (Sub->hasNoSignedWrap()) {
2999 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
3000 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
3001 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3002
3003 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
3004 if (Pred == ICmpInst::ICMP_SGT && C.isZero())
3005 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3006
3007 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
3008 if (Pred == ICmpInst::ICMP_SLT && C.isZero())
3009 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3010
3011 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
3012 if (Pred == ICmpInst::ICMP_SLT && C.isOne())
3013 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3014 }
3015
3016 if (!match(X, m_APInt(C2)))
3017 return nullptr;
3018
3019 // C2 - Y <u C -> (Y | (C - 1)) == C2
3020 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
3021 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
3022 (*C2 & (C - 1)) == (C - 1))
3023 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
3024
3025 // C2 - Y >u C -> (Y | C) != C2
3026 // iff C2 & C == C and C + 1 is a power of 2
3027 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
3028 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
3029
3030 // We have handled special cases that reduce.
3031 // Canonicalize any remaining sub to add as:
3032 // (C2 - Y) > C --> (Y + ~C2) < ~C
3033 Value *Add = Builder.CreateAdd(Y, ConstantInt::get(Ty, ~(*C2)), "notsub",
3034 HasNUW, HasNSW);
3035 return new ICmpInst(SwappedPred, Add, ConstantInt::get(Ty, ~C));
3036}
3037
3038static Value *createLogicFromTable(const std::bitset<4> &Table, Value *Op0,
3039 Value *Op1, IRBuilderBase &Builder,
3040 bool HasOneUse) {
3041 auto FoldConstant = [&](bool Val) {
3042 Constant *Res = Val ? Builder.getTrue() : Builder.getFalse();
3043 if (Op0->getType()->isVectorTy())
3045 cast<VectorType>(Op0->getType())->getElementCount(), Res);
3046 return Res;
3047 };
3048
3049 switch (Table.to_ulong()) {
3050 case 0: // 0 0 0 0
3051 return FoldConstant(false);
3052 case 1: // 0 0 0 1
3053 return HasOneUse ? Builder.CreateNot(Builder.CreateOr(Op0, Op1)) : nullptr;
3054 case 2: // 0 0 1 0
3055 return HasOneUse ? Builder.CreateAnd(Builder.CreateNot(Op0), Op1) : nullptr;
3056 case 3: // 0 0 1 1
3057 return Builder.CreateNot(Op0);
3058 case 4: // 0 1 0 0
3059 return HasOneUse ? Builder.CreateAnd(Op0, Builder.CreateNot(Op1)) : nullptr;
3060 case 5: // 0 1 0 1
3061 return Builder.CreateNot(Op1);
3062 case 6: // 0 1 1 0
3063 return Builder.CreateXor(Op0, Op1);
3064 case 7: // 0 1 1 1
3065 return HasOneUse ? Builder.CreateNot(Builder.CreateAnd(Op0, Op1)) : nullptr;
3066 case 8: // 1 0 0 0
3067 return Builder.CreateAnd(Op0, Op1);
3068 case 9: // 1 0 0 1
3069 return HasOneUse ? Builder.CreateNot(Builder.CreateXor(Op0, Op1)) : nullptr;
3070 case 10: // 1 0 1 0
3071 return Op1;
3072 case 11: // 1 0 1 1
3073 return HasOneUse ? Builder.CreateOr(Builder.CreateNot(Op0), Op1) : nullptr;
3074 case 12: // 1 1 0 0
3075 return Op0;
3076 case 13: // 1 1 0 1
3077 return HasOneUse ? Builder.CreateOr(Op0, Builder.CreateNot(Op1)) : nullptr;
3078 case 14: // 1 1 1 0
3079 return Builder.CreateOr(Op0, Op1);
3080 case 15: // 1 1 1 1
3081 return FoldConstant(true);
3082 default:
3083 llvm_unreachable("Invalid Operation");
3084 }
3085 return nullptr;
3086}
3087
3089 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3090 Value *A, *B;
3091 Constant *C1, *C2, *C3, *C4;
3092 if (!(match(BO->getOperand(0),
3093 m_Select(m_Value(A), m_Constant(C1), m_Constant(C2)))) ||
3094 !match(BO->getOperand(1),
3095 m_Select(m_Value(B), m_Constant(C3), m_Constant(C4))) ||
3096 Cmp.getType() != A->getType() || Cmp.getType() != B->getType())
3097 return nullptr;
3098
3099 std::bitset<4> Table;
3100 auto ComputeTable = [&](bool First, bool Second) -> std::optional<bool> {
3101 Constant *L = First ? C1 : C2;
3102 Constant *R = Second ? C3 : C4;
3103 if (auto *Res = ConstantFoldBinaryOpOperands(BO->getOpcode(), L, R, DL)) {
3104 auto *Val = Res->getType()->isVectorTy() ? Res->getSplatValue() : Res;
3105 if (auto *CI = dyn_cast_or_null<ConstantInt>(Val))
3106 return ICmpInst::compare(CI->getValue(), C, Cmp.getPredicate());
3107 }
3108 return std::nullopt;
3109 };
3110
3111 for (unsigned I = 0; I < 4; ++I) {
3112 bool First = (I >> 1) & 1;
3113 bool Second = I & 1;
3114 if (auto Res = ComputeTable(First, Second))
3115 Table[I] = *Res;
3116 else
3117 return nullptr;
3118 }
3119
3120 // Synthesize optimal logic.
3121 if (auto *Cond = createLogicFromTable(Table, A, B, Builder, BO->hasOneUse()))
3122 return replaceInstUsesWith(Cmp, Cond);
3123 return nullptr;
3124}
3125
3126/// Fold icmp (add X, Y), C.
3129 const APInt &C) {
3130 Value *Y = Add->getOperand(1);
3131 Value *X = Add->getOperand(0);
3132
3133 Value *Op0, *Op1;
3134 Instruction *Ext0, *Ext1;
3135 const CmpPredicate Pred = Cmp.getCmpPredicate();
3136 if (match(Add,
3139 m_ZExtOrSExt(m_Value(Op1))))) &&
3140 Op0->getType()->isIntOrIntVectorTy(1) &&
3141 Op1->getType()->isIntOrIntVectorTy(1)) {
3142 unsigned BW = C.getBitWidth();
3143 std::bitset<4> Table;
3144 auto ComputeTable = [&](bool Op0Val, bool Op1Val) {
3145 APInt Res(BW, 0);
3146 if (Op0Val)
3147 Res += APInt(BW, isa<ZExtInst>(Ext0) ? 1 : -1, /*isSigned=*/true);
3148 if (Op1Val)
3149 Res += APInt(BW, isa<ZExtInst>(Ext1) ? 1 : -1, /*isSigned=*/true);
3150 return ICmpInst::compare(Res, C, Pred);
3151 };
3152
3153 Table[0] = ComputeTable(false, false);
3154 Table[1] = ComputeTable(false, true);
3155 Table[2] = ComputeTable(true, false);
3156 Table[3] = ComputeTable(true, true);
3157 if (auto *Cond =
3158 createLogicFromTable(Table, Op0, Op1, Builder, Add->hasOneUse()))
3159 return replaceInstUsesWith(Cmp, Cond);
3160 }
3161 const APInt *C2;
3162 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
3163 return nullptr;
3164
3165 // Fold icmp pred (add X, C2), C.
3166 Type *Ty = Add->getType();
3167
3168 // If the add does not wrap, we can always adjust the compare by subtracting
3169 // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
3170 // have been canonicalized to SGT/SLT/UGT/ULT.
3171 if (Add->hasNoUnsignedWrap() &&
3172 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT)) {
3173 bool Overflow;
3174 APInt NewC = C.usub_ov(*C2, Overflow);
3175 // If there is overflow, the result must be true or false.
3176 if (!Overflow)
3177 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
3178 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
3179 }
3180
3181 CmpInst::Predicate ChosenPred = Pred.getPreferredSignedPredicate();
3182
3183 if (Add->hasNoSignedWrap() &&
3184 (ChosenPred == ICmpInst::ICMP_SGT || ChosenPred == ICmpInst::ICMP_SLT)) {
3185 bool Overflow;
3186 APInt NewC = C.ssub_ov(*C2, Overflow);
3187 if (!Overflow)
3188 // icmp samesign ugt/ult (add nsw X, C2), C
3189 // -> icmp sgt/slt X, (C - C2)
3190 return new ICmpInst(ChosenPred, X, ConstantInt::get(Ty, NewC));
3191 }
3192
3193 if (ICmpInst::isUnsigned(Pred) && Add->hasNoSignedWrap() &&
3194 C.isNonNegative() && (C - *C2).isNonNegative() &&
3195 computeConstantRange(X, /*ForSigned=*/true).add(*C2).isAllNonNegative())
3196 return new ICmpInst(ICmpInst::getSignedPredicate(Pred), X,
3197 ConstantInt::get(Ty, C - *C2));
3198
3199 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
3200 const APInt &Upper = CR.getUpper();
3201 const APInt &Lower = CR.getLower();
3202 if (Cmp.isSigned()) {
3203 if (Lower.isSignMask())
3204 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
3205 if (Upper.isSignMask())
3206 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
3207 } else {
3208 if (Lower.isMinValue())
3209 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
3210 if (Upper.isMinValue())
3211 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
3212 }
3213
3214 // This set of folds is intentionally placed after folds that use no-wrapping
3215 // flags because those folds are likely better for later analysis/codegen.
3216 const APInt SMax = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
3217 const APInt SMin = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
3218
3219 // Fold compare with offset to opposite sign compare if it eliminates offset:
3220 // (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
3221 if (Pred == CmpInst::ICMP_UGT && C == *C2 + SMax)
3222 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
3223
3224 // (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
3225 if (Pred == CmpInst::ICMP_ULT && C == *C2 + SMin)
3226 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
3227
3228 // (X + C2) >s C --> X <u (SMAX - C) (if C == C2 - 1)
3229 if (Pred == CmpInst::ICMP_SGT && C == *C2 - 1)
3230 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, SMax - C));
3231
3232 // (X + C2) <s C --> X >u (C ^ SMAX) (if C == C2)
3233 if (Pred == CmpInst::ICMP_SLT && C == *C2)
3234 return new ICmpInst(ICmpInst::ICMP_UGT, X, ConstantInt::get(Ty, C ^ SMax));
3235
3236 // (X + -1) <u C --> X <=u C (if X is never null)
3237 if (Pred == CmpInst::ICMP_ULT && C2->isAllOnes()) {
3238 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3239 if (llvm::isKnownNonZero(X, Q))
3240 return new ICmpInst(ICmpInst::ICMP_ULE, X, ConstantInt::get(Ty, C));
3241 }
3242
3243 if (!Add->hasOneUse())
3244 return nullptr;
3245
3246 // X+C <u C2 -> (X & -C2) == C
3247 // iff C & (C2-1) == 0
3248 // C2 is a power of 2
3249 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
3250 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
3252
3253 // X+C2 <u C -> (X & C) == 2C
3254 // iff C == -(C2)
3255 // C2 is a power of 2
3256 if (Pred == ICmpInst::ICMP_ULT && C2->isPowerOf2() && C == -*C2)
3257 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, C),
3258 ConstantInt::get(Ty, C * 2));
3259
3260 // X+C >u C2 -> (X & ~C2) != C
3261 // iff C & C2 == 0
3262 // C2+1 is a power of 2
3263 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
3264 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
3266
3267 // The range test idiom can use either ult or ugt. Arbitrarily canonicalize
3268 // to the ult form.
3269 // X+C2 >u C -> X+(C2-C-1) <u ~C
3270 if (Pred == ICmpInst::ICMP_UGT)
3271 return new ICmpInst(ICmpInst::ICMP_ULT,
3272 Builder.CreateAdd(X, ConstantInt::get(Ty, *C2 - C - 1)),
3273 ConstantInt::get(Ty, ~C));
3274
3275 // zext(V) + C2 pred C -> V + C3 pred' C4
3276 Value *V;
3277 if (match(X, m_ZExt(m_Value(V)))) {
3278 Type *NewCmpTy = V->getType();
3279 unsigned NewCmpBW = NewCmpTy->getScalarSizeInBits();
3280 if (shouldChangeType(Ty, NewCmpTy)) {
3281 ConstantRange SrcCR = CR.truncate(NewCmpBW, TruncInst::NoUnsignedWrap);
3282 CmpInst::Predicate EquivPred;
3283 APInt EquivInt;
3284 APInt EquivOffset;
3285
3286 SrcCR.getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
3287 return new ICmpInst(
3288 EquivPred,
3289 EquivOffset.isZero()
3290 ? V
3291 : Builder.CreateAdd(V, ConstantInt::get(NewCmpTy, EquivOffset)),
3292 ConstantInt::get(NewCmpTy, EquivInt));
3293 }
3294 }
3295
3296 return nullptr;
3297}
3298
3300 Value *&RHS, ConstantInt *&Less,
3301 ConstantInt *&Equal,
3302 ConstantInt *&Greater) {
3303 // TODO: Generalize this to work with other comparison idioms or ensure
3304 // they get canonicalized into this form.
3305
3306 // select i1 (a == b),
3307 // i32 Equal,
3308 // i32 (select i1 (a < b), i32 Less, i32 Greater)
3309 // where Equal, Less and Greater are placeholders for any three constants.
3310 CmpPredicate PredA;
3311 if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
3312 !ICmpInst::isEquality(PredA))
3313 return false;
3314 Value *EqualVal = SI->getTrueValue();
3315 Value *UnequalVal = SI->getFalseValue();
3316 // We still can get non-canonical predicate here, so canonicalize.
3317 if (PredA == ICmpInst::ICMP_NE)
3318 std::swap(EqualVal, UnequalVal);
3319 if (!match(EqualVal, m_ConstantInt(Equal)))
3320 return false;
3321 CmpPredicate PredB;
3322 Value *LHS2, *RHS2;
3323 if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
3324 m_ConstantInt(Less), m_ConstantInt(Greater))))
3325 return false;
3326 // We can get predicate mismatch here, so canonicalize if possible:
3327 // First, ensure that 'LHS' match.
3328 if (LHS2 != LHS) {
3329 // x sgt y <--> y slt x
3330 std::swap(LHS2, RHS2);
3331 PredB = ICmpInst::getSwappedPredicate(PredB);
3332 }
3333 if (LHS2 != LHS)
3334 return false;
3335 // We also need to canonicalize 'RHS'.
3336 if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
3337 // x sgt C-1 <--> x sge C <--> not(x slt C)
3338 auto FlippedStrictness =
3340 if (!FlippedStrictness)
3341 return false;
3342 assert(FlippedStrictness->first == ICmpInst::ICMP_SGE &&
3343 "basic correctness failure");
3344 RHS2 = FlippedStrictness->second;
3345 // And kind-of perform the result swap.
3346 std::swap(Less, Greater);
3347 PredB = ICmpInst::ICMP_SLT;
3348 }
3349 return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
3350}
3351
3354 ConstantInt *C) {
3355
3356 assert(C && "Cmp RHS should be a constant int!");
3357 // If we're testing a constant value against the result of a three way
3358 // comparison, the result can be expressed directly in terms of the
3359 // original values being compared. Note: We could possibly be more
3360 // aggressive here and remove the hasOneUse test. The original select is
3361 // really likely to simplify or sink when we remove a test of the result.
3362 Value *OrigLHS, *OrigRHS;
3363 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3364 if (Cmp.hasOneUse() &&
3365 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
3366 C3GreaterThan)) {
3367 assert(C1LessThan && C2Equal && C3GreaterThan);
3368
3369 bool TrueWhenLessThan = ICmpInst::compare(
3370 C1LessThan->getValue(), C->getValue(), Cmp.getPredicate());
3371 bool TrueWhenEqual = ICmpInst::compare(C2Equal->getValue(), C->getValue(),
3372 Cmp.getPredicate());
3373 bool TrueWhenGreaterThan = ICmpInst::compare(
3374 C3GreaterThan->getValue(), C->getValue(), Cmp.getPredicate());
3375
3376 // This generates the new instruction that will replace the original Cmp
3377 // Instruction. Instead of enumerating the various combinations when
3378 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
3379 // false, we rely on chaining of ORs and future passes of InstCombine to
3380 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
3381
3382 // When none of the three constants satisfy the predicate for the RHS (C),
3383 // the entire original Cmp can be simplified to a false.
3384 Value *Cond = Builder.getFalse();
3385 if (TrueWhenLessThan)
3386 Cond = Builder.CreateOr(
3387 Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
3388 if (TrueWhenEqual)
3389 Cond = Builder.CreateOr(
3390 Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
3391 if (TrueWhenGreaterThan)
3392 Cond = Builder.CreateOr(
3393 Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
3394
3395 return replaceInstUsesWith(Cmp, Cond);
3396 }
3397 return nullptr;
3398}
3399
3401 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
3402 if (!Bitcast)
3403 return nullptr;
3404
3405 ICmpInst::Predicate Pred = Cmp.getPredicate();
3406 Value *Op1 = Cmp.getOperand(1);
3407 Value *BCSrcOp = Bitcast->getOperand(0);
3408 Type *SrcType = Bitcast->getSrcTy();
3409 Type *DstType = Bitcast->getType();
3410
3411 // Make sure the bitcast doesn't change between scalar and vector and
3412 // doesn't change the number of vector elements.
3413 if (SrcType->isVectorTy() == DstType->isVectorTy() &&
3414 SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
3415 // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
3416 Value *X;
3417 if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
3418 // icmp eq (bitcast (sitofp X)), 0 --> icmp eq X, 0
3419 // icmp ne (bitcast (sitofp X)), 0 --> icmp ne X, 0
3420 // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
3421 // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
3422 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
3423 Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
3424 match(Op1, m_Zero()))
3425 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3426
3427 // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
3428 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
3429 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
3430
3431 // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
3432 if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
3433 return new ICmpInst(Pred, X,
3434 ConstantInt::getAllOnesValue(X->getType()));
3435 }
3436
3437 // Zero-equality checks are preserved through unsigned floating-point casts:
3438 // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
3439 // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
3440 if (match(BCSrcOp, m_UIToFP(m_Value(X))))
3441 if (Cmp.isEquality() && match(Op1, m_Zero()))
3442 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3443
3444 const APInt *C;
3445 bool TrueIfSigned;
3446 if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse()) {
3447 // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
3448 // the FP extend/truncate because that cast does not change the sign-bit.
3449 // This is true for all standard IEEE-754 types and the X86 80-bit type.
3450 // The sign-bit is always the most significant bit in those types.
3451 if (isSignBitCheck(Pred, *C, TrueIfSigned) &&
3452 (match(BCSrcOp, m_FPExt(m_Value(X))) ||
3453 match(BCSrcOp, m_FPTrunc(m_Value(X))))) {
3454 // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
3455 // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
3456 Type *XType = X->getType();
3457
3458 // We can't currently handle Power style floating point operations here.
3459 if (!(XType->isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
3460 Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
3461 if (auto *XVTy = dyn_cast<VectorType>(XType))
3462 NewType = VectorType::get(NewType, XVTy->getElementCount());
3463 Value *NewBitcast = Builder.CreateBitCast(X, NewType);
3464 if (TrueIfSigned)
3465 return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
3466 ConstantInt::getNullValue(NewType));
3467 else
3468 return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
3470 }
3471 }
3472
3473 // icmp eq/ne (bitcast X to int), special fp -> llvm.is.fpclass(X, class)
3474 Type *FPType = SrcType->getScalarType();
3475 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3476 Attribute::NoImplicitFloat) &&
3477 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3478 FPClassTest Mask = APFloat(FPType->getFltSemantics(), *C).classify();
3479 if (Mask & (fcInf | fcZero)) {
3480 if (Pred == ICmpInst::ICMP_NE)
3481 Mask = ~Mask;
3482 return replaceInstUsesWith(Cmp,
3483 Builder.createIsFPClass(BCSrcOp, Mask));
3484 }
3485 }
3486 }
3487 }
3488
3489 const APInt *C;
3490 if (!match(Cmp.getOperand(1), m_APInt(C)) || !DstType->isIntegerTy() ||
3491 !SrcType->isIntOrIntVectorTy())
3492 return nullptr;
3493
3494 // If this is checking if all elements of a vector compare are set or not,
3495 // invert the casted vector equality compare and test if all compare
3496 // elements are clear or not. Compare against zero is generally easier for
3497 // analysis and codegen.
3498 // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0
3499 // Example: are all elements equal? --> are zero elements not equal?
3500 // TODO: Try harder to reduce compare of 2 freely invertible operands?
3501 if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse()) {
3502 if (Value *NotBCSrcOp =
3503 getFreelyInverted(BCSrcOp, BCSrcOp->hasOneUse(), &Builder)) {
3504 Value *Cast = Builder.CreateBitCast(NotBCSrcOp, DstType);
3505 return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
3506 }
3507 }
3508
3509 // If this is checking if all elements of an extended vector are clear or not,
3510 // compare in a narrow type to eliminate the extend:
3511 // icmp eq/ne (bitcast (ext X) to iN), 0 --> icmp eq/ne (bitcast X to iM), 0
3512 Value *X;
3513 if (Cmp.isEquality() && C->isZero() && Bitcast->hasOneUse() &&
3514 match(BCSrcOp, m_ZExtOrSExt(m_Value(X)))) {
3515 if (auto *VecTy = dyn_cast<FixedVectorType>(X->getType())) {
3516 Type *NewType = Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
3517 Value *NewCast = Builder.CreateBitCast(X, NewType);
3518 return new ICmpInst(Pred, NewCast, ConstantInt::getNullValue(NewType));
3519 }
3520 }
3521
3522 // Folding: icmp <pred> iN X, C
3523 // where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
3524 // and C is a splat of a K-bit pattern
3525 // and SC is a constant vector = <C', C', C', ..., C'>
3526 // Into:
3527 // %E = extractelement <M x iK> %vec, i32 C'
3528 // icmp <pred> iK %E, trunc(C)
3529 Value *Vec;
3530 ArrayRef<int> Mask;
3531 if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
3532 // Check whether every element of Mask is the same constant
3533 if (all_equal(Mask)) {
3534 auto *VecTy = cast<VectorType>(SrcType);
3535 auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3536 if (C->isSplat(EltTy->getBitWidth())) {
3537 // Fold the icmp based on the value of C
3538 // If C is M copies of an iK sized bit pattern,
3539 // then:
3540 // => %E = extractelement <N x iK> %vec, i32 Elem
3541 // icmp <pred> iK %SplatVal, <pattern>
3542 Value *Elem = Builder.getInt32(Mask[0]);
3543 Value *Extract = Builder.CreateExtractElement(Vec, Elem);
3544 Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
3545 return new ICmpInst(Pred, Extract, NewC);
3546 }
3547 }
3548 }
3549 return nullptr;
3550}
3551
3552/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3553/// where X is some kind of instruction.
3555 const APInt *C;
3556
3557 if (match(Cmp.getOperand(1), m_APInt(C))) {
3558 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3559 if (Instruction *I = foldICmpBinOpWithConstant(Cmp, BO, *C))
3560 return I;
3561
3562 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3563 // For now, we only support constant integers while folding the
3564 // ICMP(SELECT)) pattern. We can extend this to support vector of integers
3565 // similar to the cases handled by binary ops above.
3566 if (auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3567 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
3568 return I;
3569
3570 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3571 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
3572 return I;
3573
3574 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3576 return I;
3577
3578 // (extractval ([s/u]subo X, Y), 0) == 0 --> X == Y
3579 // (extractval ([s/u]subo X, Y), 0) != 0 --> X != Y
3580 // TODO: This checks one-use, but that is not strictly necessary.
3581 Value *Cmp0 = Cmp.getOperand(0);
3582 Value *X, *Y;
3583 if (C->isZero() && Cmp.isEquality() && Cmp0->hasOneUse() &&
3584 (match(Cmp0,
3586 m_Value(X), m_Value(Y)))) ||
3587 match(Cmp0,
3589 m_Value(X), m_Value(Y))))))
3590 return new ICmpInst(Cmp.getPredicate(), X, Y);
3591 }
3592
3593 if (match(Cmp.getOperand(1), m_APIntAllowPoison(C)))
3595
3596 return nullptr;
3597}
3598
3599/// Fold an icmp equality instruction with binary operator LHS and constant RHS:
3600/// icmp eq/ne BO, C.
3602 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3603 // TODO: Some of these folds could work with arbitrary constants, but this
3604 // function is limited to scalar and vector splat constants.
3605 if (!Cmp.isEquality())
3606 return nullptr;
3607
3608 ICmpInst::Predicate Pred = Cmp.getPredicate();
3609 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
3610 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
3611 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
3612
3613 switch (BO->getOpcode()) {
3614 case Instruction::SRem:
3615 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
3616 if (C.isZero() && BO->hasOneUse()) {
3617 const APInt *BOC;
3618 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
3619 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
3620 return new ICmpInst(Pred, NewRem,
3622 }
3623 }
3624 break;
3625 case Instruction::Add: {
3626 // (A + C2) == C --> A == (C - C2)
3627 // (A + C2) != C --> A != (C - C2)
3628 // TODO: Remove the one-use limitation? See discussion in D58633.
3629 if (Constant *C2 = dyn_cast<Constant>(BOp1)) {
3630 if (BO->hasOneUse())
3631 return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, C2));
3632 } else if (C.isZero()) {
3633 // Replace ((add A, B) != 0) with (A != -B) if A or B is
3634 // efficiently invertible, or if the add has just this one use.
3635 if (Value *NegVal = dyn_castNegVal(BOp1))
3636 return new ICmpInst(Pred, BOp0, NegVal);
3637 if (Value *NegVal = dyn_castNegVal(BOp0))
3638 return new ICmpInst(Pred, NegVal, BOp1);
3639 if (BO->hasOneUse()) {
3640 // (add nuw A, B) != 0 -> (or A, B) != 0
3641 if (match(BO, m_NUWAdd(m_Value(), m_Value()))) {
3642 Value *Or = Builder.CreateOr(BOp0, BOp1);
3643 return new ICmpInst(Pred, Or, Constant::getNullValue(BO->getType()));
3644 }
3645 Value *Neg = Builder.CreateNeg(BOp1);
3646 Neg->takeName(BO);
3647 return new ICmpInst(Pred, BOp0, Neg);
3648 }
3649 }
3650 break;
3651 }
3652 case Instruction::Xor:
3653 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3654 // For the xor case, we can xor two constants together, eliminating
3655 // the explicit xor.
3656 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3657 } else if (C.isZero()) {
3658 // Replace ((xor A, B) != 0) with (A != B)
3659 return new ICmpInst(Pred, BOp0, BOp1);
3660 }
3661 break;
3662 case Instruction::Or: {
3663 const APInt *BOC;
3664 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3665 // Comparing if all bits outside of a constant mask are set?
3666 // Replace (X | C) == -1 with (X & ~C) == ~C.
3667 // This removes the -1 constant.
3669 Value *And = Builder.CreateAnd(BOp0, NotBOC);
3670 return new ICmpInst(Pred, And, NotBOC);
3671 }
3672 // (icmp eq (or (select cond, 0, NonZero), Other), 0)
3673 // -> (and cond, (icmp eq Other, 0))
3674 // (icmp ne (or (select cond, NonZero, 0), Other), 0)
3675 // -> (or cond, (icmp ne Other, 0))
3676 Value *Cond, *TV, *FV, *Other, *Sel;
3677 if (C.isZero() &&
3678 match(BO,
3681 m_Value(FV))),
3682 m_Value(Other)))) &&
3683 Cond->getType() == Cmp.getType()) {
3684 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3685 // Easy case is if eq/ne matches whether 0 is trueval/falseval.
3686 if (Pred == ICmpInst::ICMP_EQ
3687 ? (match(TV, m_Zero()) && isKnownNonZero(FV, Q))
3688 : (match(FV, m_Zero()) && isKnownNonZero(TV, Q))) {
3689 Value *Cmp = Builder.CreateICmp(
3690 Pred, Other, Constant::getNullValue(Other->getType()));
3692 Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3693 Cond);
3694 }
3695 // Harder case is if eq/ne matches whether 0 is falseval/trueval. In this
3696 // case we need to invert the select condition so we need to be careful to
3697 // avoid creating extra instructions.
3698 // (icmp ne (or (select cond, 0, NonZero), Other), 0)
3699 // -> (or (not cond), (icmp ne Other, 0))
3700 // (icmp eq (or (select cond, NonZero, 0), Other), 0)
3701 // -> (and (not cond), (icmp eq Other, 0))
3702 //
3703 // Only do this if the inner select has one use, in which case we are
3704 // replacing `select` with `(not cond)`. Otherwise, we will create more
3705 // uses. NB: Trying to freely invert cond doesn't make sense here, as if
3706 // cond was freely invertable, the select arms would have been inverted.
3707 if (Sel->hasOneUse() &&
3708 (Pred == ICmpInst::ICMP_EQ
3709 ? (match(FV, m_Zero()) && isKnownNonZero(TV, Q))
3710 : (match(TV, m_Zero()) && isKnownNonZero(FV, Q)))) {
3711 Value *NotCond = Builder.CreateNot(Cond);
3712 Value *Cmp = Builder.CreateICmp(
3713 Pred, Other, Constant::getNullValue(Other->getType()));
3715 Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3716 NotCond);
3717 }
3718 }
3719 break;
3720 }
3721 case Instruction::UDiv:
3722 case Instruction::SDiv:
3723 if (BO->isExact()) {
3724 // div exact X, Y eq/ne 0 -> X eq/ne 0
3725 // div exact X, Y eq/ne 1 -> X eq/ne Y
3726 // div exact X, Y eq/ne C ->
3727 // if Y * C never-overflow && OneUse:
3728 // -> Y * C eq/ne X
3729 if (C.isZero())
3730 return new ICmpInst(Pred, BOp0, Constant::getNullValue(BO->getType()));
3731 else if (C.isOne())
3732 return new ICmpInst(Pred, BOp0, BOp1);
3733 else if (BO->hasOneUse()) {
3735 Instruction::Mul, BO->getOpcode() == Instruction::SDiv, BOp1,
3736 Cmp.getOperand(1), BO);
3738 Value *YC =
3739 Builder.CreateMul(BOp1, ConstantInt::get(BO->getType(), C));
3740 return new ICmpInst(Pred, YC, BOp0);
3741 }
3742 }
3743 }
3744 if (BO->getOpcode() == Instruction::UDiv && C.isZero()) {
3745 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3746 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3747 return new ICmpInst(NewPred, BOp1, BOp0);
3748 }
3749 break;
3750 default:
3751 break;
3752 }
3753 return nullptr;
3754}
3755
3757 const APInt &CRhs,
3758 InstCombiner::BuilderTy &Builder,
3759 const SimplifyQuery &Q) {
3760 assert(CtpopLhs->getIntrinsicID() == Intrinsic::ctpop &&
3761 "Non-ctpop intrin in ctpop fold");
3762 if (!CtpopLhs->hasOneUse())
3763 return nullptr;
3764
3765 // Power of 2 test:
3766 // isPow2OrZero : ctpop(X) u< 2
3767 // isPow2 : ctpop(X) == 1
3768 // NotPow2OrZero: ctpop(X) u> 1
3769 // NotPow2 : ctpop(X) != 1
3770 // If we know any bit of X can be folded to:
3771 // IsPow2 : X & (~Bit) == 0
3772 // NotPow2 : X & (~Bit) != 0
3773 const ICmpInst::Predicate Pred = I.getPredicate();
3774 if (((I.isEquality() || Pred == ICmpInst::ICMP_UGT) && CRhs == 1) ||
3775 (Pred == ICmpInst::ICMP_ULT && CRhs == 2)) {
3776 Value *Op = CtpopLhs->getArgOperand(0);
3777 KnownBits OpKnown = computeKnownBits(Op, Q.DL, Q.AC, Q.CxtI, Q.DT);
3778 // No need to check for count > 1, that should be already constant folded.
3779 if (OpKnown.countMinPopulation() == 1) {
3780 Value *And = Builder.CreateAnd(
3781 Op, Constant::getIntegerValue(Op->getType(), ~(OpKnown.One)));
3782 return new ICmpInst(
3783 (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_ULT)
3786 And, Constant::getNullValue(Op->getType()));
3787 }
3788 }
3789
3790 return nullptr;
3791}
3792
3793/// Fold an equality icmp with LLVM intrinsic and constant operand.
3795 ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3796 Type *Ty = II->getType();
3797 unsigned BitWidth = C.getBitWidth();
3798 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3799
3800 switch (II->getIntrinsicID()) {
3801 case Intrinsic::abs:
3802 // abs(A) == 0 -> A == 0
3803 // abs(A) == INT_MIN -> A == INT_MIN
3804 if (C.isZero() || C.isMinSignedValue())
3805 return new ICmpInst(Pred, II->getArgOperand(0), ConstantInt::get(Ty, C));
3806 break;
3807
3808 case Intrinsic::bswap:
3809 // bswap(A) == C -> A == bswap(C)
3810 return new ICmpInst(Pred, II->getArgOperand(0),
3811 ConstantInt::get(Ty, C.byteSwap()));
3812
3813 case Intrinsic::bitreverse:
3814 // bitreverse(A) == C -> A == bitreverse(C)
3815 return new ICmpInst(Pred, II->getArgOperand(0),
3816 ConstantInt::get(Ty, C.reverseBits()));
3817
3818 case Intrinsic::ctlz:
3819 case Intrinsic::cttz: {
3820 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
3821 if (C == BitWidth)
3822 return new ICmpInst(Pred, II->getArgOperand(0),
3824
3825 // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3826 // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3827 // Limit to one use to ensure we don't increase instruction count.
3828 unsigned Num = C.getLimitedValue(BitWidth);
3829 if (Num != BitWidth && II->hasOneUse()) {
3830 bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3831 APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3832 : APInt::getHighBitsSet(BitWidth, Num + 1);
3833 APInt Mask2 = IsTrailing
3836 return new ICmpInst(Pred, Builder.CreateAnd(II->getArgOperand(0), Mask1),
3837 ConstantInt::get(Ty, Mask2));
3838 }
3839 break;
3840 }
3841
3842 case Intrinsic::ctpop: {
3843 // popcount(A) == 0 -> A == 0 and likewise for !=
3844 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
3845 bool IsZero = C.isZero();
3846 if (IsZero || C == BitWidth)
3847 return new ICmpInst(Pred, II->getArgOperand(0),
3848 IsZero ? Constant::getNullValue(Ty)
3850
3851 break;
3852 }
3853
3854 case Intrinsic::fshl:
3855 case Intrinsic::fshr:
3856 if (II->getArgOperand(0) == II->getArgOperand(1)) {
3857 const APInt *RotAmtC;
3858 // ror(X, RotAmtC) == C --> X == rol(C, RotAmtC)
3859 // rol(X, RotAmtC) == C --> X == ror(C, RotAmtC)
3860 if (match(II->getArgOperand(2), m_APInt(RotAmtC)))
3861 return new ICmpInst(Pred, II->getArgOperand(0),
3862 II->getIntrinsicID() == Intrinsic::fshl
3863 ? ConstantInt::get(Ty, C.rotr(*RotAmtC))
3864 : ConstantInt::get(Ty, C.rotl(*RotAmtC)));
3865 }
3866 break;
3867
3868 case Intrinsic::umax:
3869 case Intrinsic::uadd_sat: {
3870 // uadd.sat(a, b) == 0 -> (a | b) == 0
3871 // umax(a, b) == 0 -> (a | b) == 0
3872 if (C.isZero() && II->hasOneUse()) {
3873 Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3874 return new ICmpInst(Pred, Or, Constant::getNullValue(Ty));
3875 }
3876 break;
3877 }
3878
3879 case Intrinsic::ssub_sat:
3880 // ssub.sat(a, b) == 0 -> a == b
3881 //
3882 // Note this doesn't work for ssub.sat.i1 because ssub.sat.i1 0, -1 = 0
3883 // (because 1 saturates to 0). Just skip the optimization for i1.
3884 if (C.isZero() && II->getType()->getScalarSizeInBits() > 1)
3885 return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
3886 break;
3887 case Intrinsic::usub_sat: {
3888 // usub.sat(a, b) == 0 -> a <= b
3889 if (C.isZero()) {
3890 ICmpInst::Predicate NewPred =
3892 return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3893 }
3894 break;
3895 }
3896 default:
3897 break;
3898 }
3899
3900 return nullptr;
3901}
3902
3903/// Fold an icmp with LLVM intrinsics
3904static Instruction *
3906 InstCombiner::BuilderTy &Builder) {
3907 assert(Cmp.isEquality());
3908
3909 ICmpInst::Predicate Pred = Cmp.getPredicate();
3910 Value *Op0 = Cmp.getOperand(0);
3911 Value *Op1 = Cmp.getOperand(1);
3912 const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3913 const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3914 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3915 return nullptr;
3916
3917 switch (IIOp0->getIntrinsicID()) {
3918 case Intrinsic::bswap:
3919 case Intrinsic::bitreverse:
3920 // If both operands are byte-swapped or bit-reversed, just compare the
3921 // original values.
3922 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3923 case Intrinsic::fshl:
3924 case Intrinsic::fshr: {
3925 // If both operands are rotated by same amount, just compare the
3926 // original values.
3927 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3928 break;
3929 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3930 break;
3931 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3932 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3933
3934 // rotate(X, AmtX) == rotate(Y, AmtY)
3935 // -> rotate(X, AmtX - AmtY) == Y
3936 // Do this if either both rotates have one use or if only one has one use
3937 // and AmtX/AmtY are constants.
3938 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3939 if (OneUses == 2 ||
3940 (OneUses == 1 && match(IIOp0->getOperand(2), m_ImmConstant()) &&
3941 match(IIOp1->getOperand(2), m_ImmConstant()))) {
3942 Value *SubAmt =
3943 Builder.CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3944 Value *CombinedRotate = Builder.CreateIntrinsic(
3945 Op0->getType(), IIOp0->getIntrinsicID(),
3946 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3947 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3948 }
3949 } break;
3950 default:
3951 break;
3952 }
3953
3954 return nullptr;
3955}
3956
3957/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3958/// where X is some kind of instruction and C is AllowPoison.
3959/// TODO: Move more folds which allow poison to this function.
3962 const APInt &C) {
3963 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3964 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
3965 switch (II->getIntrinsicID()) {
3966 default:
3967 break;
3968 case Intrinsic::fshl:
3969 case Intrinsic::fshr:
3970 if (Cmp.isEquality() && II->getArgOperand(0) == II->getArgOperand(1)) {
3971 // (rot X, ?) == 0/-1 --> X == 0/-1
3972 if (C.isZero() || C.isAllOnes())
3973 return new ICmpInst(Pred, II->getArgOperand(0), Cmp.getOperand(1));
3974 }
3975 break;
3976 }
3977 }
3978
3979 return nullptr;
3980}
3981
3982/// Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
3984 BinaryOperator *BO,
3985 const APInt &C) {
3986 switch (BO->getOpcode()) {
3987 case Instruction::Xor:
3988 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
3989 return I;
3990 break;
3991 case Instruction::And:
3992 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
3993 return I;
3994 break;
3995 case Instruction::Or:
3996 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
3997 return I;
3998 break;
3999 case Instruction::Mul:
4000 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
4001 return I;
4002 break;
4003 case Instruction::Shl:
4004 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
4005 return I;
4006 break;
4007 case Instruction::LShr:
4008 case Instruction::AShr:
4009 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
4010 return I;
4011 break;
4012 case Instruction::SRem:
4013 if (Instruction *I = foldICmpSRemConstant(Cmp, BO, C))
4014 return I;
4015 break;
4016 case Instruction::UDiv:
4017 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
4018 return I;
4019 [[fallthrough]];
4020 case Instruction::SDiv:
4021 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
4022 return I;
4023 break;
4024 case Instruction::Sub:
4025 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
4026 return I;
4027 break;
4028 case Instruction::Add:
4029 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
4030 return I;
4031 break;
4032 default:
4033 break;
4034 }
4035
4036 // TODO: These folds could be refactored to be part of the above calls.
4038 return I;
4039
4040 // Fall back to handling `icmp pred (select A ? C1 : C2) binop (select B ? C3
4041 // : C4), C5` pattern, by computing a truth table of the four constant
4042 // variants.
4044}
4045
4046static Instruction *
4048 const APInt &C,
4049 InstCombiner::BuilderTy &Builder) {
4050 // This transform may end up producing more than one instruction for the
4051 // intrinsic, so limit it to one user of the intrinsic.
4052 if (!II->hasOneUse())
4053 return nullptr;
4054
4055 // Let Y = [add/sub]_sat(X, C) pred C2
4056 // SatVal = The saturating value for the operation
4057 // WillWrap = Whether or not the operation will underflow / overflow
4058 // => Y = (WillWrap ? SatVal : (X binop C)) pred C2
4059 // => Y = WillWrap ? (SatVal pred C2) : ((X binop C) pred C2)
4060 //
4061 // When (SatVal pred C2) is true, then
4062 // Y = WillWrap ? true : ((X binop C) pred C2)
4063 // => Y = WillWrap || ((X binop C) pred C2)
4064 // else
4065 // Y = WillWrap ? false : ((X binop C) pred C2)
4066 // => Y = !WillWrap ? ((X binop C) pred C2) : false
4067 // => Y = !WillWrap && ((X binop C) pred C2)
4068 Value *Op0 = II->getOperand(0);
4069 Value *Op1 = II->getOperand(1);
4070
4071 const APInt *COp1;
4072 // This transform only works when the intrinsic has an integral constant or
4073 // splat vector as the second operand.
4074 if (!match(Op1, m_APInt(COp1)))
4075 return nullptr;
4076
4077 APInt SatVal;
4078 switch (II->getIntrinsicID()) {
4079 default:
4081 "This function only works with usub_sat and uadd_sat for now!");
4082 case Intrinsic::uadd_sat:
4083 SatVal = APInt::getAllOnes(C.getBitWidth());
4084 break;
4085 case Intrinsic::usub_sat:
4086 SatVal = APInt::getZero(C.getBitWidth());
4087 break;
4088 }
4089
4090 // Check (SatVal pred C2)
4091 bool SatValCheck = ICmpInst::compare(SatVal, C, Pred);
4092
4093 // !WillWrap.
4095 II->getBinaryOp(), *COp1, II->getNoWrapKind());
4096
4097 // WillWrap.
4098 if (SatValCheck)
4099 C1 = C1.inverse();
4100
4102 if (II->getBinaryOp() == Instruction::Add)
4103 C2 = C2.sub(*COp1);
4104 else
4105 C2 = C2.add(*COp1);
4106
4107 Instruction::BinaryOps CombiningOp =
4108 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4109
4110 std::optional<ConstantRange> Combination;
4111 if (CombiningOp == Instruction::BinaryOps::Or)
4112 Combination = C1.exactUnionWith(C2);
4113 else /* CombiningOp == Instruction::BinaryOps::And */
4114 Combination = C1.exactIntersectWith(C2);
4115
4116 if (!Combination)
4117 return nullptr;
4118
4119 CmpInst::Predicate EquivPred;
4120 APInt EquivInt;
4121 APInt EquivOffset;
4122
4123 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4124
4125 return new ICmpInst(
4126 EquivPred,
4127 Builder.CreateAdd(Op0, ConstantInt::get(Op1->getType(), EquivOffset)),
4128 ConstantInt::get(Op1->getType(), EquivInt));
4129}
4130
4131static Instruction *
4133 const APInt &C,
4134 InstCombiner::BuilderTy &Builder) {
4135 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4136 switch (Pred) {
4137 case ICmpInst::ICMP_EQ:
4138 case ICmpInst::ICMP_NE:
4139 if (C.isZero())
4140 NewPredicate = Pred;
4141 else if (C.isOne())
4142 NewPredicate =
4144 else if (C.isAllOnes())
4145 NewPredicate =
4147 break;
4148
4149 case ICmpInst::ICMP_SGT:
4150 if (C.isAllOnes())
4151 NewPredicate = ICmpInst::ICMP_UGE;
4152 else if (C.isZero())
4153 NewPredicate = ICmpInst::ICMP_UGT;
4154 break;
4155
4156 case ICmpInst::ICMP_SLT:
4157 if (C.isZero())
4158 NewPredicate = ICmpInst::ICMP_ULT;
4159 else if (C.isOne())
4160 NewPredicate = ICmpInst::ICMP_ULE;
4161 break;
4162
4163 case ICmpInst::ICMP_ULT:
4164 if (C.ugt(1))
4165 NewPredicate = ICmpInst::ICMP_UGE;
4166 break;
4167
4168 case ICmpInst::ICMP_UGT:
4169 if (!C.isZero() && !C.isAllOnes())
4170 NewPredicate = ICmpInst::ICMP_ULT;
4171 break;
4172
4173 default:
4174 break;
4175 }
4176
4177 if (!NewPredicate)
4178 return nullptr;
4179
4180 if (I->getIntrinsicID() == Intrinsic::scmp)
4181 NewPredicate = ICmpInst::getSignedPredicate(*NewPredicate);
4182 Value *LHS = I->getOperand(0);
4183 Value *RHS = I->getOperand(1);
4184 return new ICmpInst(*NewPredicate, LHS, RHS);
4185}
4186
4187/// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
4190 const APInt &C) {
4191 ICmpInst::Predicate Pred = Cmp.getPredicate();
4192
4193 // Handle folds that apply for any kind of icmp.
4194 switch (II->getIntrinsicID()) {
4195 default:
4196 break;
4197 case Intrinsic::uadd_sat:
4198 case Intrinsic::usub_sat:
4199 if (auto *Folded = foldICmpUSubSatOrUAddSatWithConstant(
4200 Pred, cast<SaturatingInst>(II), C, Builder))
4201 return Folded;
4202 break;
4203 case Intrinsic::ctpop: {
4204 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
4205 if (Instruction *R = foldCtpopPow2Test(Cmp, II, C, Builder, Q))
4206 return R;
4207 } break;
4208 case Intrinsic::scmp:
4209 case Intrinsic::ucmp:
4210 if (auto *Folded = foldICmpOfCmpIntrinsicWithConstant(Pred, II, C, Builder))
4211 return Folded;
4212 break;
4213 }
4214
4215 if (Cmp.isEquality())
4216 return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
4217
4218 Type *Ty = II->getType();
4219 unsigned BitWidth = C.getBitWidth();
4220 switch (II->getIntrinsicID()) {
4221 case Intrinsic::ctpop: {
4222 // (ctpop X > BitWidth - 1) --> X == -1
4223 Value *X = II->getArgOperand(0);
4224 if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
4225 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
4227 // (ctpop X < BitWidth) --> X != -1
4228 if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
4229 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
4231 break;
4232 }
4233 case Intrinsic::ctlz: {
4234 // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
4235 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4236 unsigned Num = C.getLimitedValue();
4237 APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
4238 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
4239 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4240 }
4241
4242 // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
4243 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4244 unsigned Num = C.getLimitedValue();
4246 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
4247 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4248 }
4249 break;
4250 }
4251 case Intrinsic::cttz: {
4252 // Limit to one use to ensure we don't increase instruction count.
4253 if (!II->hasOneUse())
4254 return nullptr;
4255
4256 // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
4257 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4258 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
4259 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
4260 Builder.CreateAnd(II->getArgOperand(0), Mask),
4262 }
4263
4264 // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
4265 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4266 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
4267 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
4268 Builder.CreateAnd(II->getArgOperand(0), Mask),
4270 }
4271 break;
4272 }
4273 case Intrinsic::ssub_sat:
4274 // ssub.sat(a, b) spred 0 -> a spred b
4275 //
4276 // Note this doesn't work for ssub.sat.i1 because ssub.sat.i1 0, -1 = 0
4277 // (because 1 saturates to 0). Just skip the optimization for i1.
4278 if (ICmpInst::isSigned(Pred) && C.getBitWidth() > 1) {
4279 if (C.isZero())
4280 return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
4281 // X s<= 0 is cannonicalized to X s< 1
4282 if (Pred == ICmpInst::ICMP_SLT && C.isOne())
4283 return new ICmpInst(ICmpInst::ICMP_SLE, II->getArgOperand(0),
4284 II->getArgOperand(1));
4285 // X s>= 0 is cannonicalized to X s> -1
4286 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
4287 return new ICmpInst(ICmpInst::ICMP_SGE, II->getArgOperand(0),
4288 II->getArgOperand(1));
4289 }
4290 break;
4291 default:
4292 break;
4293 }
4294
4295 return nullptr;
4296}
4297
4298/// Handle icmp with constant (but not simple integer constant) RHS.
4300 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4301 Constant *RHSC = dyn_cast<Constant>(Op1);
4303 if (!RHSC || !LHSI)
4304 return nullptr;
4305
4306 switch (LHSI->getOpcode()) {
4307 case Instruction::IntToPtr:
4308 // icmp pred inttoptr(X), null -> icmp pred X, 0
4309 if (RHSC->isNullValue() &&
4310 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
4311 return new ICmpInst(
4312 I.getPredicate(), LHSI->getOperand(0),
4314 break;
4315
4316 case Instruction::Load:
4317 // Try to optimize things like "A[i] > 4" to index computations.
4318 if (GetElementPtrInst *GEP =
4320 if (Instruction *Res =
4322 return Res;
4323 break;
4324 }
4325
4326 return nullptr;
4327}
4328
4330 Value *RHS, const ICmpInst &I) {
4331 // Try to fold the comparison into the select arms, which will cause the
4332 // select to be converted into a logical and/or.
4333 auto SimplifyOp = [&](Value *Op, bool SelectCondIsTrue) -> Value * {
4334 if (Value *Res = simplifyICmpInst(Pred, Op, RHS, SQ))
4335 return Res;
4336 if (std::optional<bool> Impl = isImpliedCondition(
4337 SI->getCondition(), Pred, Op, RHS, DL, SelectCondIsTrue))
4338 return ConstantInt::get(I.getType(), *Impl);
4339 return nullptr;
4340 };
4341
4342 ConstantInt *CI = nullptr;
4343 Value *Op1 = SimplifyOp(SI->getOperand(1), true);
4344 if (Op1)
4345 CI = dyn_cast<ConstantInt>(Op1);
4346
4347 Value *Op2 = SimplifyOp(SI->getOperand(2), false);
4348 if (Op2)
4349 CI = dyn_cast<ConstantInt>(Op2);
4350
4351 auto Simplifies = [&](Value *Op, unsigned Idx) {
4352 // A comparison of ucmp/scmp with a constant will fold into an icmp.
4353 const APInt *Dummy;
4354 return Op ||
4355 (isa<CmpIntrinsic>(SI->getOperand(Idx)) &&
4356 SI->getOperand(Idx)->hasOneUse() && match(RHS, m_APInt(Dummy)));
4357 };
4358
4359 // We only want to perform this transformation if it will not lead to
4360 // additional code. This is true if either both sides of the select
4361 // fold to a constant (in which case the icmp is replaced with a select
4362 // which will usually simplify) or this is the only user of the
4363 // select (in which case we are trading a select+icmp for a simpler
4364 // select+icmp) or all uses of the select can be replaced based on
4365 // dominance information ("Global cases").
4366 bool Transform = false;
4367 if (Op1 && Op2)
4368 Transform = true;
4369 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4370 // Local case
4371 if (SI->hasOneUse())
4372 Transform = true;
4373 // Global cases
4374 else if (CI && !CI->isZero())
4375 // When Op1 is constant try replacing select with second operand.
4376 // Otherwise Op2 is constant and try replacing select with first
4377 // operand.
4378 Transform = replacedSelectWithOperand(SI, &I, Op1 ? 2 : 1);
4379 }
4380 if (Transform) {
4381 if (!Op1)
4382 Op1 = Builder.CreateICmp(Pred, SI->getOperand(1), RHS, I.getName());
4383 if (!Op2)
4384 Op2 = Builder.CreateICmp(Pred, SI->getOperand(2), RHS, I.getName());
4385 return SelectInst::Create(SI->getOperand(0), Op1, Op2);
4386 }
4387
4388 return nullptr;
4389}
4390
4391// Returns whether V is a Mask ((X + 1) & X == 0) or ~Mask (-Pow2OrZero)
4392static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q,
4393 unsigned Depth = 0) {
4394 if (Not ? match(V, m_NegatedPower2OrZero()) : match(V, m_LowBitMaskOrZero()))
4395 return true;
4396 if (V->getType()->getScalarSizeInBits() == 1)
4397 return true;
4399 return false;
4400 Value *X;
4402 if (!I)
4403 return false;
4404 switch (I->getOpcode()) {
4405 case Instruction::ZExt:
4406 // ZExt(Mask) is a Mask.
4407 return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4408 case Instruction::SExt:
4409 // SExt(Mask) is a Mask.
4410 // SExt(~Mask) is a ~Mask.
4411 return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4412 case Instruction::And:
4413 case Instruction::Or:
4414 // Mask0 | Mask1 is a Mask.
4415 // Mask0 & Mask1 is a Mask.
4416 // ~Mask0 | ~Mask1 is a ~Mask.
4417 // ~Mask0 & ~Mask1 is a ~Mask.
4418 return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4419 isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4420 case Instruction::Xor:
4421 if (match(V, m_Not(m_Value(X))))
4422 return isMaskOrZero(X, !Not, Q, Depth);
4423
4424 // (X ^ -X) is a ~Mask
4425 if (Not)
4426 return match(V, m_c_Xor(m_Value(X), m_Neg(m_Deferred(X))));
4427 // (X ^ (X - 1)) is a Mask
4428 else
4429 return match(V, m_c_Xor(m_Value(X), m_Add(m_Deferred(X), m_AllOnes())));
4430 case Instruction::Select:
4431 // c ? Mask0 : Mask1 is a Mask.
4432 return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4433 isMaskOrZero(I->getOperand(2), Not, Q, Depth);
4434 case Instruction::Shl:
4435 // (~Mask) << X is a ~Mask.
4436 return Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4437 case Instruction::LShr:
4438 // Mask >> X is a Mask.
4439 return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4440 case Instruction::AShr:
4441 // Mask s>> X is a Mask.
4442 // ~Mask s>> X is a ~Mask.
4443 return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4444 case Instruction::Add:
4445 // Pow2 - 1 is a Mask.
4446 if (!Not && match(I->getOperand(1), m_AllOnes()))
4447 return isKnownToBeAPowerOfTwo(I->getOperand(0), Q.DL, /*OrZero*/ true,
4448 Q.AC, Q.CxtI, Q.DT, Depth);
4449 break;
4450 case Instruction::Sub:
4451 // -Pow2 is a ~Mask.
4452 if (Not && match(I->getOperand(0), m_Zero()))
4453 return isKnownToBeAPowerOfTwo(I->getOperand(1), Q.DL, /*OrZero*/ true,
4454 Q.AC, Q.CxtI, Q.DT, Depth);
4455 break;
4456 case Instruction::Call: {
4457 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
4458 switch (II->getIntrinsicID()) {
4459 // min/max(Mask0, Mask1) is a Mask.
4460 // min/max(~Mask0, ~Mask1) is a ~Mask.
4461 case Intrinsic::umax:
4462 case Intrinsic::smax:
4463 case Intrinsic::umin:
4464 case Intrinsic::smin:
4465 return isMaskOrZero(II->getArgOperand(1), Not, Q, Depth) &&
4466 isMaskOrZero(II->getArgOperand(0), Not, Q, Depth);
4467
4468 // In the context of masks, bitreverse(Mask) == ~Mask
4469 case Intrinsic::bitreverse:
4470 return isMaskOrZero(II->getArgOperand(0), !Not, Q, Depth);
4471 default:
4472 break;
4473 }
4474 }
4475 break;
4476 }
4477 default:
4478 break;
4479 }
4480 return false;
4481}
4482
4483/// Some comparisons can be simplified.
4484/// In this case, we are looking for comparisons that look like
4485/// a check for a lossy truncation.
4486/// Folds:
4487/// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask
4488/// icmp SrcPred (x & ~Mask), ~Mask to icmp DstPred x, ~Mask
4489/// icmp eq/ne (x & ~Mask), 0 to icmp DstPred x, Mask
4490/// icmp eq/ne (~x | Mask), -1 to icmp DstPred x, Mask
4491/// Where Mask is some pattern that produces all-ones in low bits:
4492/// (-1 >> y)
4493/// ((-1 << y) >> y) <- non-canonical, has extra uses
4494/// ~(-1 << y)
4495/// ((1 << y) + (-1)) <- non-canonical, has extra uses
4496/// The Mask can be a constant, too.
4497/// For some predicates, the operands are commutative.
4498/// For others, x can only be on a specific side.
4500 Value *Op1, const SimplifyQuery &Q,
4501 InstCombiner &IC) {
4502
4503 ICmpInst::Predicate DstPred;
4504 switch (Pred) {
4506 // x & Mask == x
4507 // x & ~Mask == 0
4508 // ~x | Mask == -1
4509 // -> x u<= Mask
4510 // x & ~Mask == ~Mask
4511 // -> ~Mask u<= x
4513 break;
4515 // x & Mask != x
4516 // x & ~Mask != 0
4517 // ~x | Mask != -1
4518 // -> x u> Mask
4519 // x & ~Mask != ~Mask
4520 // -> ~Mask u> x
4522 break;
4524 // x & Mask u< x
4525 // -> x u> Mask
4526 // x & ~Mask u< ~Mask
4527 // -> ~Mask u> x
4529 break;
4531 // x & Mask u>= x
4532 // -> x u<= Mask
4533 // x & ~Mask u>= ~Mask
4534 // -> ~Mask u<= x
4536 break;
4538 // x & Mask s< x [iff Mask s>= 0]
4539 // -> x s> Mask
4540 // x & ~Mask s< ~Mask [iff ~Mask != 0]
4541 // -> ~Mask s> x
4543 break;
4545 // x & Mask s>= x [iff Mask s>= 0]
4546 // -> x s<= Mask
4547 // x & ~Mask s>= ~Mask [iff ~Mask != 0]
4548 // -> ~Mask s<= x
4550 break;
4551 default:
4552 // We don't support sgt,sle
4553 // ult/ugt are simplified to true/false respectively.
4554 return nullptr;
4555 }
4556
4557 Value *X, *M;
4558 // Put search code in lambda for early positive returns.
4559 auto IsLowBitMask = [&]() {
4560 if (match(Op0, m_c_And(m_Specific(Op1), m_Value(M)))) {
4561 X = Op1;
4562 // Look for: x & Mask pred x
4563 if (isMaskOrZero(M, /*Not=*/false, Q)) {
4564 return !ICmpInst::isSigned(Pred) ||
4565 (match(M, m_NonNegative()) || isKnownNonNegative(M, Q));
4566 }
4567
4568 // Look for: x & ~Mask pred ~Mask
4569 if (isMaskOrZero(X, /*Not=*/true, Q)) {
4570 return !ICmpInst::isSigned(Pred) || isKnownNonZero(X, Q);
4571 }
4572 return false;
4573 }
4574 if (ICmpInst::isEquality(Pred) && match(Op1, m_AllOnes()) &&
4575 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(M))))) {
4576
4577 auto Check = [&]() {
4578 // Look for: ~x | Mask == -1
4579 if (isMaskOrZero(M, /*Not=*/false, Q)) {
4580 if (Value *NotX =
4581 IC.getFreelyInverted(X, X->hasOneUse(), &IC.Builder)) {
4582 X = NotX;
4583 return true;
4584 }
4585 }
4586 return false;
4587 };
4588 if (Check())
4589 return true;
4590 std::swap(X, M);
4591 return Check();
4592 }
4593 if (ICmpInst::isEquality(Pred) && match(Op1, m_Zero()) &&
4594 match(Op0, m_OneUse(m_And(m_Value(X), m_Value(M))))) {
4595 auto Check = [&]() {
4596 // Look for: x & ~Mask == 0
4597 if (isMaskOrZero(M, /*Not=*/true, Q)) {
4598 if (Value *NotM =
4599 IC.getFreelyInverted(M, M->hasOneUse(), &IC.Builder)) {
4600 M = NotM;
4601 return true;
4602 }
4603 }
4604 return false;
4605 };
4606 if (Check())
4607 return true;
4608 std::swap(X, M);
4609 return Check();
4610 }
4611 return false;
4612 };
4613
4614 if (!IsLowBitMask())
4615 return nullptr;
4616
4617 return IC.Builder.CreateICmp(DstPred, X, M);
4618}
4619
4620/// Some comparisons can be simplified.
4621/// In this case, we are looking for comparisons that look like
4622/// a check for a lossy signed truncation.
4623/// Folds: (MaskedBits is a constant.)
4624/// ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
4625/// Into:
4626/// (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
4627/// Where KeptBits = bitwidth(%x) - MaskedBits
4628static Value *
4630 InstCombiner::BuilderTy &Builder) {
4631 CmpPredicate SrcPred;
4632 Value *X;
4633 const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
4634 // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
4635 if (!match(&I, m_c_ICmp(SrcPred,
4637 m_APInt(C1))),
4638 m_Deferred(X))))
4639 return nullptr;
4640
4641 // Potential handling of non-splats: for each element:
4642 // * if both are undef, replace with constant 0.
4643 // Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
4644 // * if both are not undef, and are different, bailout.
4645 // * else, only one is undef, then pick the non-undef one.
4646
4647 // The shift amount must be equal.
4648 if (*C0 != *C1)
4649 return nullptr;
4650 const APInt &MaskedBits = *C0;
4651 assert(MaskedBits != 0 && "shift by zero should be folded away already.");
4652
4653 ICmpInst::Predicate DstPred;
4654 switch (SrcPred) {
4656 // ((%x << MaskedBits) a>> MaskedBits) == %x
4657 // =>
4658 // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
4660 break;
4662 // ((%x << MaskedBits) a>> MaskedBits) != %x
4663 // =>
4664 // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
4666 break;
4667 // FIXME: are more folds possible?
4668 default:
4669 return nullptr;
4670 }
4671
4672 auto *XType = X->getType();
4673 const unsigned XBitWidth = XType->getScalarSizeInBits();
4674 const APInt BitWidth = APInt(XBitWidth, XBitWidth);
4675 assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
4676
4677 // KeptBits = bitwidth(%x) - MaskedBits
4678 const APInt KeptBits = BitWidth - MaskedBits;
4679 assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
4680 // ICmpCst = (1 << KeptBits)
4681 const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
4682 assert(ICmpCst.isPowerOf2());
4683 // AddCst = (1 << (KeptBits-1))
4684 const APInt AddCst = ICmpCst.lshr(1);
4685 assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
4686
4687 // T0 = add %x, AddCst
4688 Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
4689 // T1 = T0 DstPred ICmpCst
4690 Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
4691
4692 return T1;
4693}
4694
4695// Given pattern:
4696// icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4697// we should move shifts to the same hand of 'and', i.e. rewrite as
4698// icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
4699// We are only interested in opposite logical shifts here.
4700// One of the shifts can be truncated.
4701// If we can, we want to end up creating 'lshr' shift.
4702static Value *
4704 InstCombiner::BuilderTy &Builder) {
4705 if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
4706 !I.getOperand(0)->hasOneUse())
4707 return nullptr;
4708
4709 auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
4710
4711 // Look for an 'and' of two logical shifts, one of which may be truncated.
4712 // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
4713 Instruction *XShift, *MaybeTruncation, *YShift;
4714 if (!match(
4715 I.getOperand(0),
4716 m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
4718 m_AnyLogicalShift, m_Instruction(YShift))),
4719 m_Instruction(MaybeTruncation)))))
4720 return nullptr;
4721
4722 // We potentially looked past 'trunc', but only when matching YShift,
4723 // therefore YShift must have the widest type.
4724 Instruction *WidestShift = YShift;
4725 // Therefore XShift must have the shallowest type.
4726 // Or they both have identical types if there was no truncation.
4727 Instruction *NarrowestShift = XShift;
4728
4729 Type *WidestTy = WidestShift->getType();
4730 Type *NarrowestTy = NarrowestShift->getType();
4731 assert(NarrowestTy == I.getOperand(0)->getType() &&
4732 "We did not look past any shifts while matching XShift though.");
4733 bool HadTrunc = WidestTy != I.getOperand(0)->getType();
4734
4735 // If YShift is a 'lshr', swap the shifts around.
4736 if (match(YShift, m_LShr(m_Value(), m_Value())))
4737 std::swap(XShift, YShift);
4738
4739 // The shifts must be in opposite directions.
4740 auto XShiftOpcode = XShift->getOpcode();
4741 if (XShiftOpcode == YShift->getOpcode())
4742 return nullptr; // Do not care about same-direction shifts here.
4743
4744 Value *X, *XShAmt, *Y, *YShAmt;
4745 match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
4746 match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
4747
4748 // If one of the values being shifted is a constant, then we will end with
4749 // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
4750 // however, we will need to ensure that we won't increase instruction count.
4751 if (!isa<Constant>(X) && !isa<Constant>(Y)) {
4752 // At least one of the hands of the 'and' should be one-use shift.
4753 if (!match(I.getOperand(0),
4754 m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
4755 return nullptr;
4756 if (HadTrunc) {
4757 // Due to the 'trunc', we will need to widen X. For that either the old
4758 // 'trunc' or the shift amt in the non-truncated shift should be one-use.
4759 if (!MaybeTruncation->hasOneUse() &&
4760 !NarrowestShift->getOperand(1)->hasOneUse())
4761 return nullptr;
4762 }
4763 }
4764
4765 // We have two shift amounts from two different shifts. The types of those
4766 // shift amounts may not match. If that's the case let's bailout now.
4767 if (XShAmt->getType() != YShAmt->getType())
4768 return nullptr;
4769
4770 // As input, we have the following pattern:
4771 // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4772 // We want to rewrite that as:
4773 // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
4774 // While we know that originally (Q+K) would not overflow
4775 // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
4776 // shift amounts. so it may now overflow in smaller bitwidth.
4777 // To ensure that does not happen, we need to ensure that the total maximal
4778 // shift amount is still representable in that smaller bit width.
4779 unsigned MaximalPossibleTotalShiftAmount =
4780 (WidestTy->getScalarSizeInBits() - 1) +
4781 (NarrowestTy->getScalarSizeInBits() - 1);
4782 APInt MaximalRepresentableShiftAmount =
4784 if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
4785 return nullptr;
4786
4787 // Can we fold (XShAmt+YShAmt) ?
4788 auto *NewShAmt = dyn_cast_or_null<Constant>(
4789 simplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
4790 /*isNUW=*/false, SQ.getWithInstruction(&I)));
4791 if (!NewShAmt)
4792 return nullptr;
4793 if (NewShAmt->getType() != WidestTy) {
4794 NewShAmt =
4795 ConstantFoldCastOperand(Instruction::ZExt, NewShAmt, WidestTy, SQ.DL);
4796 if (!NewShAmt)
4797 return nullptr;
4798 }
4799 unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
4800
4801 // Is the new shift amount smaller than the bit width?
4802 // FIXME: could also rely on ConstantRange.
4803 if (!match(NewShAmt,
4805 APInt(WidestBitWidth, WidestBitWidth))))
4806 return nullptr;
4807
4808 // An extra legality check is needed if we had trunc-of-lshr.
4809 if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
4810 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4811 WidestShift]() {
4812 // It isn't obvious whether it's worth it to analyze non-constants here.
4813 // Also, let's basically give up on non-splat cases, pessimizing vectors.
4814 // If *any* of these preconditions matches we can perform the fold.
4815 Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
4816 ? NewShAmt->getSplatValue()
4817 : NewShAmt;
4818 // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
4819 if (NewShAmtSplat &&
4820 (NewShAmtSplat->isNullValue() ||
4821 NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
4822 return true;
4823 // We consider *min* leading zeros so a single outlier
4824 // blocks the transform as opposed to allowing it.
4825 if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
4826 KnownBits Known = computeKnownBits(C, SQ.DL);
4827 unsigned MinLeadZero = Known.countMinLeadingZeros();
4828 // If the value being shifted has at most lowest bit set we can fold.
4829 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4830 if (MaxActiveBits <= 1)
4831 return true;
4832 // Precondition: NewShAmt u<= countLeadingZeros(C)
4833 if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
4834 return true;
4835 }
4836 if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
4837 KnownBits Known = computeKnownBits(C, SQ.DL);
4838 unsigned MinLeadZero = Known.countMinLeadingZeros();
4839 // If the value being shifted has at most lowest bit set we can fold.
4840 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4841 if (MaxActiveBits <= 1)
4842 return true;
4843 // Precondition: ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
4844 if (NewShAmtSplat) {
4845 APInt AdjNewShAmt =
4846 (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
4847 if (AdjNewShAmt.ule(MinLeadZero))
4848 return true;
4849 }
4850 }
4851 return false; // Can't tell if it's ok.
4852 };
4853 if (!CanFold())
4854 return nullptr;
4855 }
4856
4857 // All good, we can do this fold.
4858 X = Builder.CreateZExt(X, WidestTy);
4859 Y = Builder.CreateZExt(Y, WidestTy);
4860 // The shift is the same that was for X.
4861 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4862 ? Builder.CreateLShr(X, NewShAmt)
4863 : Builder.CreateShl(X, NewShAmt);
4864 Value *T1 = Builder.CreateAnd(T0, Y);
4865 return Builder.CreateICmp(I.getPredicate(), T1,
4866 Constant::getNullValue(WidestTy));
4867}
4868
4869/// Fold
4870/// (-1 u/ x) u< y
4871/// ((x * y) ?/ x) != y
4872/// to
4873/// @llvm.?mul.with.overflow(x, y) plus extraction of overflow bit
4874/// Note that the comparison is commutative, while inverted (u>=, ==) predicate
4875/// will mean that we are looking for the opposite answer.
4877 CmpPredicate Pred;
4878 Value *X, *Y;
4880 Instruction *Div;
4881 bool NeedNegation;
4882 // Look for: (-1 u/ x) u</u>= y
4883 if (!I.isEquality() &&
4884 match(&I, m_c_ICmp(Pred,
4886 m_Instruction(Div)),
4887 m_Value(Y)))) {
4888 Mul = nullptr;
4889
4890 // Are we checking that overflow does not happen, or does happen?
4891 switch (Pred) {
4893 NeedNegation = false;
4894 break; // OK
4896 NeedNegation = true;
4897 break; // OK
4898 default:
4899 return nullptr; // Wrong predicate.
4900 }
4901 } else // Look for: ((x * y) / x) !=/== y
4902 if (I.isEquality() &&
4903 match(&I, m_c_ICmp(Pred, m_Value(Y),
4906 m_Value(X)),
4908 m_Deferred(X))),
4909 m_Instruction(Div))))) {
4910 NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
4911 } else
4912 return nullptr;
4913
4915 // If the pattern included (x * y), we'll want to insert new instructions
4916 // right before that original multiplication so that we can replace it.
4917 bool MulHadOtherUses = Mul && !Mul->hasOneUse();
4918 if (MulHadOtherUses)
4919 Builder.SetInsertPoint(Mul);
4920
4921 CallInst *Call = Builder.CreateIntrinsic(
4922 Div->getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4923 : Intrinsic::smul_with_overflow,
4924 X->getType(), {X, Y}, /*FMFSource=*/nullptr, "mul");
4925
4926 // If the multiplication was used elsewhere, to ensure that we don't leave
4927 // "duplicate" instructions, replace uses of that original multiplication
4928 // with the multiplication result from the with.overflow intrinsic.
4929 if (MulHadOtherUses)
4930 replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "mul.val"));
4931
4932 Value *Res = Builder.CreateExtractValue(Call, 1, "mul.ov");
4933 if (NeedNegation) // This technically increases instruction count.
4934 Res = Builder.CreateNot(Res, "mul.not.ov");
4935
4936 // If we replaced the mul, erase it. Do this after all uses of Builder,
4937 // as the mul is used as insertion point.
4938 if (MulHadOtherUses)
4940
4941 return Res;
4942}
4943
4945 InstCombiner::BuilderTy &Builder) {
4946 CmpPredicate Pred;
4947 Value *X;
4948 if (match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X)))) {
4949
4950 if (ICmpInst::isSigned(Pred))
4951 Pred = ICmpInst::getSwappedPredicate(Pred);
4952 else if (ICmpInst::isUnsigned(Pred))
4953 Pred = ICmpInst::getSignedPredicate(Pred);
4954 // else for equality-comparisons just keep the predicate.
4955
4956 return ICmpInst::Create(Instruction::ICmp, Pred, X,
4957 Constant::getNullValue(X->getType()), I.getName());
4958 }
4959
4960 // A value is not equal to its negation unless that value is 0 or
4961 // MinSignedValue, ie: a != -a --> (a & MaxSignedVal) != 0
4962 if (match(&I, m_c_ICmp(Pred, m_OneUse(m_Neg(m_Value(X))), m_Deferred(X))) &&
4963 ICmpInst::isEquality(Pred)) {
4964 Type *Ty = X->getType();
4965 uint32_t BitWidth = Ty->getScalarSizeInBits();
4966 Constant *MaxSignedVal =
4967 ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
4968 Value *And = Builder.CreateAnd(X, MaxSignedVal);
4969 Constant *Zero = Constant::getNullValue(Ty);
4970 return CmpInst::Create(Instruction::ICmp, Pred, And, Zero);
4971 }
4972
4973 return nullptr;
4974}
4975
4977 InstCombinerImpl &IC) {
4978 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
4979 // Normalize and operand as operand 0.
4980 CmpInst::Predicate Pred = I.getPredicate();
4981 if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) {
4982 std::swap(Op0, Op1);
4983 Pred = ICmpInst::getSwappedPredicate(Pred);
4984 }
4985
4986 if (!match(Op0, m_c_And(m_Specific(Op1), m_Value(A))))
4987 return nullptr;
4988
4989 // (icmp (X & Y) u< X --> (X & Y) != X
4990 if (Pred == ICmpInst::ICMP_ULT)
4991 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4992
4993 // (icmp (X & Y) u>= X --> (X & Y) == X
4994 if (Pred == ICmpInst::ICMP_UGE)
4995 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
4996
4997 if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
4998 // icmp (X & Y) eq/ne Y --> (X | ~Y) eq/ne -1 if Y is freely invertible and
4999 // Y is non-constant. If Y is constant the `X & C == C` form is preferable
5000 // so don't do this fold.
5001 if (!match(Op1, m_ImmConstant()))
5002 if (auto *NotOp1 =
5003 IC.getFreelyInverted(Op1, !Op1->hasNUsesOrMore(3), &IC.Builder))
5004 return new ICmpInst(Pred, IC.Builder.CreateOr(A, NotOp1),
5005 Constant::getAllOnesValue(Op1->getType()));
5006 // icmp (X & Y) eq/ne Y --> (~X & Y) eq/ne 0 if X is freely invertible.
5007 if (auto *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
5008 return new ICmpInst(Pred, IC.Builder.CreateAnd(Op1, NotA),
5009 Constant::getNullValue(Op1->getType()));
5010 }
5011
5012 if (!ICmpInst::isSigned(Pred))
5013 return nullptr;
5014
5015 KnownBits KnownY = IC.computeKnownBits(A, &I);
5016 // (X & NegY) spred X --> (X & NegY) upred X
5017 if (KnownY.isNegative())
5018 return new ICmpInst(ICmpInst::getUnsignedPredicate(Pred), Op0, Op1);
5019
5020 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGT)
5021 return nullptr;
5022
5023 if (KnownY.isNonNegative())
5024 // (X & PosY) s<= X --> X s>= 0
5025 // (X & PosY) s> X --> X s< 0
5026 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
5027 Constant::getNullValue(Op1->getType()));
5028
5030 // (NegX & Y) s<= NegX --> Y s< 0
5031 // (NegX & Y) s> NegX --> Y s>= 0
5033 Constant::getNullValue(A->getType()));
5034
5035 return nullptr;
5036}
5037
5039 InstCombinerImpl &IC) {
5040 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5041
5042 // Normalize or operand as operand 0.
5043 CmpInst::Predicate Pred = I.getPredicate();
5044 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value(A)))) {
5045 std::swap(Op0, Op1);
5046 Pred = ICmpInst::getSwappedPredicate(Pred);
5047 } else if (!match(Op0, m_c_Or(m_Specific(Op1), m_Value(A)))) {
5048 return nullptr;
5049 }
5050
5051 // icmp (X | Y) u<= X --> (X | Y) == X
5052 if (Pred == ICmpInst::ICMP_ULE)
5053 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5054
5055 // icmp (X | Y) u> X --> (X | Y) != X
5056 if (Pred == ICmpInst::ICMP_UGT)
5057 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5058
5059 if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
5060 // icmp (X | Y) eq/ne Y --> (X & ~Y) eq/ne 0 if Y is freely invertible
5061 if (Value *NotOp1 = IC.getFreelyInverted(
5062 Op1, !isa<Constant>(Op1) && !Op1->hasNUsesOrMore(3), &IC.Builder))
5063 return new ICmpInst(Pred, IC.Builder.CreateAnd(A, NotOp1),
5064 Constant::getNullValue(Op1->getType()));
5065 // icmp (X | Y) eq/ne Y --> (~X | Y) eq/ne -1 if X is freely invertible.
5066 if (Value *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
5067 return new ICmpInst(Pred, IC.Builder.CreateOr(Op1, NotA),
5068 Constant::getAllOnesValue(Op1->getType()));
5069 }
5070 return nullptr;
5071}
5072
5074 InstCombinerImpl &IC) {
5075 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5076 // Normalize xor operand as operand 0.
5077 CmpInst::Predicate Pred = I.getPredicate();
5078 if (match(Op1, m_c_Xor(m_Specific(Op0), m_Value()))) {
5079 std::swap(Op0, Op1);
5080 Pred = ICmpInst::getSwappedPredicate(Pred);
5081 }
5082 if (!match(Op0, m_c_Xor(m_Specific(Op1), m_Value(A))))
5083 return nullptr;
5084
5085 // icmp (X ^ Y_NonZero) u>= X --> icmp (X ^ Y_NonZero) u> X
5086 // icmp (X ^ Y_NonZero) u<= X --> icmp (X ^ Y_NonZero) u< X
5087 // icmp (X ^ Y_NonZero) s>= X --> icmp (X ^ Y_NonZero) s> X
5088 // icmp (X ^ Y_NonZero) s<= X --> icmp (X ^ Y_NonZero) s< X
5090 if (PredOut != Pred && isKnownNonZero(A, Q))
5091 return new ICmpInst(PredOut, Op0, Op1);
5092
5093 // These transform work when A is negative.
5094 // X s< X^A, X s<= X^A, X u> X^A, X u>= X^A --> X s< 0
5095 // X s> X^A, X s>= X^A, X u< X^A, X u<= X^A --> X s>= 0
5096 if (match(A, m_Negative())) {
5097 CmpInst::Predicate NewPred;
5098 switch (ICmpInst::getStrictPredicate(Pred)) {
5099 default:
5100 return nullptr;
5101 case ICmpInst::ICMP_SLT:
5102 case ICmpInst::ICMP_UGT:
5103 NewPred = ICmpInst::ICMP_SLT;
5104 break;
5105 case ICmpInst::ICMP_SGT:
5106 case ICmpInst::ICMP_ULT:
5107 NewPred = ICmpInst::ICMP_SGE;
5108 break;
5109 }
5110 Constant *Const = Constant::getNullValue(Op0->getType());
5111 return new ICmpInst(NewPred, Op0, Const);
5112 }
5113
5114 return nullptr;
5115}
5116
5117/// Return true if X is a multiple of C.
5118/// TODO: Handle non-power-of-2 factors.
5119static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q) {
5120 if (C.isOne())
5121 return true;
5122
5123 if (!C.isPowerOf2())
5124 return false;
5125
5126 return MaskedValueIsZero(X, C - 1, Q);
5127}
5128
5129/// Try to fold icmp (binop), X or icmp X, (binop).
5130/// TODO: A large part of this logic is duplicated in InstSimplify's
5131/// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
5132/// duplication.
5134 const SimplifyQuery &SQ) {
5135 const SimplifyQuery Q = SQ.getWithInstruction(&I);
5136 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5137
5138 // Special logic for binary operators.
5141 if (!BO0 && !BO1)
5142 return nullptr;
5143
5144 if (Instruction *NewICmp = foldICmpXNegX(I, Builder))
5145 return NewICmp;
5146
5147 const CmpInst::Predicate Pred = I.getPredicate();
5148 Value *X;
5149
5150 // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
5151 // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
5152 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
5153 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5154 return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
5155 // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
5156 if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
5157 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5158 return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
5159
5160 {
5161 // (Op1 + X) + C u</u>= Op1 --> ~C - X u</u>= Op1
5162 Constant *C;
5163 if (match(Op0, m_OneUse(m_Add(m_c_Add(m_Specific(Op1), m_Value(X)),
5164 m_ImmConstant(C)))) &&
5165 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
5167 return new ICmpInst(Pred, Builder.CreateSub(C2, X), Op1);
5168 }
5169 // Op0 u>/u<= (Op0 + X) + C --> Op0 u>/u<= ~C - X
5170 if (match(Op1, m_OneUse(m_Add(m_c_Add(m_Specific(Op0), m_Value(X)),
5171 m_ImmConstant(C)))) &&
5172 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE)) {
5174 return new ICmpInst(Pred, Op0, Builder.CreateSub(C2, X));
5175 }
5176 }
5177
5178 // (icmp eq/ne (X, -P2), INT_MIN)
5179 // -> (icmp slt/sge X, INT_MIN + P2)
5180 if (ICmpInst::isEquality(Pred) && BO0 &&
5181 match(I.getOperand(1), m_SignMask()) &&
5183 // Will Constant fold.
5184 Value *NewC = Builder.CreateSub(I.getOperand(1), BO0->getOperand(1));
5185 return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SLT
5187 BO0->getOperand(0), NewC);
5188 }
5189
5190 {
5191 // Similar to above: an unsigned overflow comparison may use offset + mask:
5192 // ((Op1 + C) & C) u< Op1 --> Op1 != 0
5193 // ((Op1 + C) & C) u>= Op1 --> Op1 == 0
5194 // Op0 u> ((Op0 + C) & C) --> Op0 != 0
5195 // Op0 u<= ((Op0 + C) & C) --> Op0 == 0
5196 BinaryOperator *BO;
5197 const APInt *C;
5198 if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
5199 match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5201 CmpInst::Predicate NewPred =
5203 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5204 return new ICmpInst(NewPred, Op1, Zero);
5205 }
5206
5207 if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5208 match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5210 CmpInst::Predicate NewPred =
5212 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5213 return new ICmpInst(NewPred, Op0, Zero);
5214 }
5215 }
5216
5217 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
5218 bool Op0HasNUW = false, Op1HasNUW = false;
5219 bool Op0HasNSW = false, Op1HasNSW = false;
5220 // Analyze the case when either Op0 or Op1 is an add instruction.
5221 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
5222 auto hasNoWrapProblem = [](const BinaryOperator &BO, CmpInst::Predicate Pred,
5223 bool &HasNSW, bool &HasNUW) -> bool {
5225 HasNUW = BO.hasNoUnsignedWrap();
5226 HasNSW = BO.hasNoSignedWrap();
5227 return ICmpInst::isEquality(Pred) ||
5228 (CmpInst::isUnsigned(Pred) && HasNUW) ||
5229 (CmpInst::isSigned(Pred) && HasNSW);
5230 } else if (BO.getOpcode() == Instruction::Or) {
5231 HasNUW = true;
5232 HasNSW = true;
5233 return true;
5234 } else {
5235 return false;
5236 }
5237 };
5238 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
5239
5240 if (BO0) {
5241 match(BO0, m_AddLike(m_Value(A), m_Value(B)));
5242 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5243 }
5244 if (BO1) {
5245 match(BO1, m_AddLike(m_Value(C), m_Value(D)));
5246 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5247 }
5248
5249 // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
5250 // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
5251 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
5252 return new ICmpInst(Pred, A == Op1 ? B : A,
5253 Constant::getNullValue(Op1->getType()));
5254
5255 // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
5256 // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
5257 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
5258 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
5259 C == Op0 ? D : C);
5260
5261 // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
5262 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
5263 NoOp1WrapProblem) {
5264 // Determine Y and Z in the form icmp (X+Y), (X+Z).
5265 Value *Y, *Z;
5266 if (A == C) {
5267 // C + B == C + D -> B == D
5268 Y = B;
5269 Z = D;
5270 } else if (A == D) {
5271 // D + B == C + D -> B == C
5272 Y = B;
5273 Z = C;
5274 } else if (B == C) {
5275 // A + C == C + D -> A == D
5276 Y = A;
5277 Z = D;
5278 } else {
5279 assert(B == D);
5280 // A + D == C + D -> A == C
5281 Y = A;
5282 Z = C;
5283 }
5284 return new ICmpInst(Pred, Y, Z);
5285 }
5286
5287 if (ICmpInst::isRelational(Pred)) {
5288 // Return if both X and Y is divisible by Z/-Z.
5289 // TODO: Generalize to check if (X - Y) is divisible by Z/-Z.
5290 auto ShareCommonDivisor = [&Q](Value *X, Value *Y, Value *Z,
5291 bool IsNegative) -> bool {
5292 const APInt *OffsetC;
5293 if (!match(Z, m_APInt(OffsetC)))
5294 return false;
5295
5296 // Fast path for Z == 1/-1.
5297 if (IsNegative ? OffsetC->isAllOnes() : OffsetC->isOne())
5298 return true;
5299
5300 APInt C = *OffsetC;
5301 if (IsNegative)
5302 C.negate();
5303 // Note: -INT_MIN is also negative.
5304 if (!C.isStrictlyPositive())
5305 return false;
5306
5307 return isMultipleOf(X, C, Q) && isMultipleOf(Y, C, Q);
5308 };
5309
5310 // TODO: The subtraction-related identities shown below also hold, but
5311 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
5312 // wouldn't happen even if they were implemented.
5313 //
5314 // icmp ult (A - 1), Op1 -> icmp ule A, Op1
5315 // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
5316 // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
5317 // icmp ule Op0, (C - 1) -> icmp ult Op0, C
5318
5319 // icmp slt (A + -1), Op1 -> icmp sle A, Op1
5320 // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
5321 // icmp sle (A + 1), Op1 -> icmp slt A, Op1
5322 // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
5323 // icmp ule (A + 1), Op0 -> icmp ult A, Op1
5324 // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
5325 if (A && NoOp0WrapProblem &&
5326 ShareCommonDivisor(A, Op1, B,
5327 ICmpInst::isLT(Pred) || ICmpInst::isGE(Pred)))
5329 Op1);
5330
5331 // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
5332 // icmp sle Op0, (C + -1) -> icmp slt Op0, C
5333 // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
5334 // icmp slt Op0, (C + 1) -> icmp sle Op0, C
5335 // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
5336 // icmp ult Op0, (C + 1) -> icmp ule Op0, C
5337 if (C && NoOp1WrapProblem &&
5338 ShareCommonDivisor(Op0, C, D,
5339 ICmpInst::isGT(Pred) || ICmpInst::isLE(Pred)))
5341 C);
5342 }
5343
5344 // if C1 has greater magnitude than C2:
5345 // icmp (A + C1), (C + C2) -> icmp (A + C3), C
5346 // s.t. C3 = C1 - C2
5347 //
5348 // if C2 has greater magnitude than C1:
5349 // icmp (A + C1), (C + C2) -> icmp A, (C + C3)
5350 // s.t. C3 = C2 - C1
5351 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
5352 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) {
5353 const APInt *AP1, *AP2;
5354 // TODO: Support non-uniform vectors.
5355 // TODO: Allow poison passthrough if B or D's element is poison.
5356 if (match(B, m_APIntAllowPoison(AP1)) &&
5357 match(D, m_APIntAllowPoison(AP2)) &&
5358 AP1->isNegative() == AP2->isNegative()) {
5359 APInt AP1Abs = AP1->abs();
5360 APInt AP2Abs = AP2->abs();
5361 if (AP1Abs.uge(AP2Abs)) {
5362 APInt Diff = *AP1 - *AP2;
5363 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5364 Value *NewAdd = Builder.CreateAdd(
5365 A, C3, "", Op0HasNUW && Diff.ule(*AP1), Op0HasNSW);
5366 return new ICmpInst(Pred, NewAdd, C);
5367 } else {
5368 APInt Diff = *AP2 - *AP1;
5369 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5370 Value *NewAdd = Builder.CreateAdd(
5371 C, C3, "", Op1HasNUW && Diff.ule(*AP2), Op1HasNSW);
5372 return new ICmpInst(Pred, A, NewAdd);
5373 }
5374 }
5375 Constant *Cst1, *Cst2;
5376 if (match(B, m_ImmConstant(Cst1)) && match(D, m_ImmConstant(Cst2)) &&
5377 ICmpInst::isEquality(Pred)) {
5378 Constant *Diff = ConstantExpr::getSub(Cst2, Cst1);
5379 Value *NewAdd = Builder.CreateAdd(C, Diff);
5380 return new ICmpInst(Pred, A, NewAdd);
5381 }
5382 }
5383
5384 // Analyze the case when either Op0 or Op1 is a sub instruction.
5385 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
5386 A = nullptr;
5387 B = nullptr;
5388 C = nullptr;
5389 D = nullptr;
5390 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
5391 A = BO0->getOperand(0);
5392 B = BO0->getOperand(1);
5393 }
5394 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
5395 C = BO1->getOperand(0);
5396 D = BO1->getOperand(1);
5397 }
5398
5399 // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
5400 if (A == Op1 && NoOp0WrapProblem)
5401 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
5402 // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
5403 if (C == Op0 && NoOp1WrapProblem)
5404 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
5405
5406 // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
5407 // (A - B) u>/u<= A --> B u>/u<= A
5408 if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5409 return new ICmpInst(Pred, B, A);
5410 // C u</u>= (C - D) --> C u</u>= D
5411 if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5412 return new ICmpInst(Pred, C, D);
5413 // (A - B) u>=/u< A --> B u>/u<= A iff B != 0
5414 if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5415 isKnownNonZero(B, Q))
5417 // C u<=/u> (C - D) --> C u</u>= D iff B != 0
5418 if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
5419 isKnownNonZero(D, Q))
5421
5422 // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
5423 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
5424 return new ICmpInst(Pred, A, C);
5425
5426 // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
5427 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
5428 return new ICmpInst(Pred, D, B);
5429
5430 // icmp (0-X) < cst --> x > -cst
5431 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
5432 Value *X;
5433 if (match(BO0, m_Neg(m_Value(X))))
5434 if (Constant *RHSC = dyn_cast<Constant>(Op1))
5435 if (RHSC->isNotMinSignedValue())
5436 return new ICmpInst(I.getSwappedPredicate(), X,
5437 ConstantExpr::getNeg(RHSC));
5438 }
5439
5440 if (Instruction *R = foldICmpXorXX(I, Q, *this))
5441 return R;
5442 if (Instruction *R = foldICmpOrXX(I, Q, *this))
5443 return R;
5444
5445 {
5446 // Try to remove shared multiplier from comparison:
5447 // X * Z pred Y * Z
5448 Value *X, *Y, *Z;
5449 if ((match(Op0, m_Mul(m_Value(X), m_Value(Z))) &&
5450 match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y)))) ||
5451 (match(Op0, m_Mul(m_Value(Z), m_Value(X))) &&
5452 match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y))))) {
5453 if (ICmpInst::isSigned(Pred)) {
5454 if (Op0HasNSW && Op1HasNSW) {
5455 KnownBits ZKnown = computeKnownBits(Z, &I);
5456 if (ZKnown.isStrictlyPositive())
5457 return new ICmpInst(Pred, X, Y);
5458 if (ZKnown.isNegative())
5459 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), X, Y);
5461 SQ.getWithInstruction(&I));
5462 if (LessThan && match(LessThan, m_One()))
5463 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Z,
5464 Constant::getNullValue(Z->getType()));
5465 Value *GreaterThan = simplifyICmpInst(ICmpInst::ICMP_SGT, X, Y,
5466 SQ.getWithInstruction(&I));
5467 if (GreaterThan && match(GreaterThan, m_One()))
5468 return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5469 }
5470 } else {
5471 bool NonZero;
5472 if (ICmpInst::isEquality(Pred)) {
5473 // If X != Y, fold (X *nw Z) eq/ne (Y *nw Z) -> Z eq/ne 0
5474 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5475 isKnownNonEqual(X, Y, SQ))
5476 return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5477
5478 KnownBits ZKnown = computeKnownBits(Z, &I);
5479 // if Z % 2 != 0
5480 // X * Z eq/ne Y * Z -> X eq/ne Y
5481 if (ZKnown.countMaxTrailingZeros() == 0)
5482 return new ICmpInst(Pred, X, Y);
5483 NonZero = !ZKnown.One.isZero() || isKnownNonZero(Z, Q);
5484 // if Z != 0 and nsw(X * Z) and nsw(Y * Z)
5485 // X * Z eq/ne Y * Z -> X eq/ne Y
5486 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5487 return new ICmpInst(Pred, X, Y);
5488 } else
5489 NonZero = isKnownNonZero(Z, Q);
5490
5491 // If Z != 0 and nuw(X * Z) and nuw(Y * Z)
5492 // X * Z u{lt/le/gt/ge}/eq/ne Y * Z -> X u{lt/le/gt/ge}/eq/ne Y
5493 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5494 return new ICmpInst(Pred, X, Y);
5495 }
5496 }
5497 }
5498
5499 BinaryOperator *SRem = nullptr;
5500 // icmp (srem X, Y), Y
5501 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
5502 SRem = BO0;
5503 // icmp Y, (srem X, Y)
5504 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
5505 Op0 == BO1->getOperand(1))
5506 SRem = BO1;
5507 if (SRem) {
5508 // We don't check hasOneUse to avoid increasing register pressure because
5509 // the value we use is the same value this instruction was already using.
5510 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
5511 default:
5512 break;
5513 case ICmpInst::ICMP_EQ:
5514 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5515 case ICmpInst::ICMP_NE:
5516 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5517 case ICmpInst::ICMP_SGT:
5518 case ICmpInst::ICMP_SGE:
5519 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
5521 case ICmpInst::ICMP_SLT:
5522 case ICmpInst::ICMP_SLE:
5523 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
5525 }
5526 }
5527
5528 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
5529 (BO0->hasOneUse() || BO1->hasOneUse()) &&
5530 BO0->getOperand(1) == BO1->getOperand(1)) {
5531 switch (BO0->getOpcode()) {
5532 default:
5533 break;
5534 case Instruction::Add:
5535 case Instruction::Sub:
5536 case Instruction::Xor: {
5537 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
5538 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5539
5540 const APInt *C;
5541 if (match(BO0->getOperand(1), m_APInt(C))) {
5542 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
5543 if (C->isSignMask()) {
5544 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5545 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5546 }
5547
5548 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
5549 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
5550 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5551 NewPred = I.getSwappedPredicate(NewPred);
5552 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5553 }
5554 }
5555 break;
5556 }
5557 case Instruction::Mul: {
5558 if (!I.isEquality())
5559 break;
5560
5561 const APInt *C;
5562 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isZero() &&
5563 !C->isOne()) {
5564 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
5565 // Mask = -1 >> count-trailing-zeros(C).
5566 if (unsigned TZs = C->countr_zero()) {
5567 Constant *Mask = ConstantInt::get(
5568 BO0->getType(),
5569 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
5570 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
5571 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
5572 return new ICmpInst(Pred, And1, And2);
5573 }
5574 }
5575 break;
5576 }
5577 case Instruction::UDiv:
5578 case Instruction::LShr:
5579 if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
5580 break;
5581 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5582
5583 case Instruction::SDiv:
5584 if (!(I.isEquality() || match(BO0->getOperand(1), m_NonNegative())) ||
5585 !BO0->isExact() || !BO1->isExact())
5586 break;
5587 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5588
5589 case Instruction::AShr:
5590 if (!BO0->isExact() || !BO1->isExact())
5591 break;
5592 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5593
5594 case Instruction::Shl: {
5595 bool NUW = Op0HasNUW && Op1HasNUW;
5596 bool NSW = Op0HasNSW && Op1HasNSW;
5597 if (!NUW && !NSW)
5598 break;
5599 if (!NSW && I.isSigned())
5600 break;
5601 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5602 }
5603 }
5604 }
5605
5606 if (BO0) {
5607 // Transform A & (L - 1) `ult` L --> L != 0
5608 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
5609 auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
5610
5611 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
5612 auto *Zero = Constant::getNullValue(BO0->getType());
5613 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
5614 }
5615 }
5616
5617 // For unsigned predicates / eq / ne:
5618 // icmp pred (x << 1), x --> icmp getSignedPredicate(pred) x, 0
5619 // icmp pred x, (x << 1) --> icmp getSignedPredicate(pred) 0, x
5620 if (!ICmpInst::isSigned(Pred)) {
5621 if (match(Op0, m_Shl(m_Specific(Op1), m_One())))
5622 return new ICmpInst(ICmpInst::getSignedPredicate(Pred), Op1,
5623 Constant::getNullValue(Op1->getType()));
5624 else if (match(Op1, m_Shl(m_Specific(Op0), m_One())))
5625 return new ICmpInst(ICmpInst::getSignedPredicate(Pred),
5626 Constant::getNullValue(Op0->getType()), Op0);
5627 }
5628
5630 return replaceInstUsesWith(I, V);
5631
5632 if (Instruction *R = foldICmpAndXX(I, Q, *this))
5633 return R;
5634
5636 return replaceInstUsesWith(I, V);
5637
5639 return replaceInstUsesWith(I, V);
5640
5641 return nullptr;
5642}
5643
5644/// Fold icmp Pred min|max(X, Y), Z.
5647 Value *Z, CmpPredicate Pred) {
5648 Value *X = MinMax->getLHS();
5649 Value *Y = MinMax->getRHS();
5650 if (ICmpInst::isSigned(Pred) && !MinMax->isSigned())
5651 return nullptr;
5652 if (ICmpInst::isUnsigned(Pred) && MinMax->isSigned()) {
5653 // Revert the transform signed pred -> unsigned pred
5654 // TODO: We can flip the signedness of predicate if both operands of icmp
5655 // are negative.
5656 if (isKnownNonNegative(Z, SQ.getWithInstruction(&I)) &&
5657 isKnownNonNegative(MinMax, SQ.getWithInstruction(&I))) {
5659 } else
5660 return nullptr;
5661 }
5662 SimplifyQuery Q = SQ.getWithInstruction(&I);
5663 auto IsCondKnownTrue = [](Value *Val) -> std::optional<bool> {
5664 if (!Val)
5665 return std::nullopt;
5666 if (match(Val, m_One()))
5667 return true;
5668 if (match(Val, m_Zero()))
5669 return false;
5670 return std::nullopt;
5671 };
5672 // Remove samesign here since it is illegal to keep it when we speculatively
5673 // execute comparisons. For example, `icmp samesign ult umax(X, -46), -32`
5674 // cannot be decomposed into `(icmp samesign ult X, -46) or (icmp samesign ult
5675 // -46, -32)`. `X` is allowed to be non-negative here.
5676 Pred = Pred.dropSameSign();
5677 auto CmpXZ = IsCondKnownTrue(simplifyICmpInst(Pred, X, Z, Q));
5678 auto CmpYZ = IsCondKnownTrue(simplifyICmpInst(Pred, Y, Z, Q));
5679 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5680 return nullptr;
5681 if (!CmpXZ.has_value()) {
5682 std::swap(X, Y);
5683 std::swap(CmpXZ, CmpYZ);
5684 }
5685
5686 auto FoldIntoCmpYZ = [&]() -> Instruction * {
5687 if (CmpYZ.has_value())
5688 return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *CmpYZ));
5689 return ICmpInst::Create(Instruction::ICmp, Pred, Y, Z);
5690 };
5691
5692 switch (Pred) {
5693 case ICmpInst::ICMP_EQ:
5694 case ICmpInst::ICMP_NE: {
5695 // If X == Z:
5696 // Expr Result
5697 // min(X, Y) == Z X <= Y
5698 // max(X, Y) == Z X >= Y
5699 // min(X, Y) != Z X > Y
5700 // max(X, Y) != Z X < Y
5701 if ((Pred == ICmpInst::ICMP_EQ) == *CmpXZ) {
5702 ICmpInst::Predicate NewPred =
5703 ICmpInst::getNonStrictPredicate(MinMax->getPredicate());
5704 if (Pred == ICmpInst::ICMP_NE)
5705 NewPred = ICmpInst::getInversePredicate(NewPred);
5706 return ICmpInst::Create(Instruction::ICmp, NewPred, X, Y);
5707 }
5708 // Otherwise (X != Z):
5709 ICmpInst::Predicate NewPred = MinMax->getPredicate();
5710 auto MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5711 if (!MinMaxCmpXZ.has_value()) {
5712 std::swap(X, Y);
5713 std::swap(CmpXZ, CmpYZ);
5714 // Re-check pre-condition X != Z
5715 if (!CmpXZ.has_value() || (Pred == ICmpInst::ICMP_EQ) == *CmpXZ)
5716 break;
5717 MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5718 }
5719 if (!MinMaxCmpXZ.has_value())
5720 break;
5721 if (*MinMaxCmpXZ) {
5722 // Expr Fact Result
5723 // min(X, Y) == Z X < Z false
5724 // max(X, Y) == Z X > Z false
5725 // min(X, Y) != Z X < Z true
5726 // max(X, Y) != Z X > Z true
5727 return replaceInstUsesWith(
5728 I, ConstantInt::getBool(I.getType(), Pred == ICmpInst::ICMP_NE));
5729 } else {
5730 // Expr Fact Result
5731 // min(X, Y) == Z X > Z Y == Z
5732 // max(X, Y) == Z X < Z Y == Z
5733 // min(X, Y) != Z X > Z Y != Z
5734 // max(X, Y) != Z X < Z Y != Z
5735 return FoldIntoCmpYZ();
5736 }
5737 break;
5738 }
5739 case ICmpInst::ICMP_SLT:
5740 case ICmpInst::ICMP_ULT:
5741 case ICmpInst::ICMP_SLE:
5742 case ICmpInst::ICMP_ULE:
5743 case ICmpInst::ICMP_SGT:
5744 case ICmpInst::ICMP_UGT:
5745 case ICmpInst::ICMP_SGE:
5746 case ICmpInst::ICMP_UGE: {
5747 bool IsSame = MinMax->getPredicate() == ICmpInst::getStrictPredicate(Pred);
5748 if (*CmpXZ) {
5749 if (IsSame) {
5750 // Expr Fact Result
5751 // min(X, Y) < Z X < Z true
5752 // min(X, Y) <= Z X <= Z true
5753 // max(X, Y) > Z X > Z true
5754 // max(X, Y) >= Z X >= Z true
5755 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5756 } else {
5757 // Expr Fact Result
5758 // max(X, Y) < Z X < Z Y < Z
5759 // max(X, Y) <= Z X <= Z Y <= Z
5760 // min(X, Y) > Z X > Z Y > Z
5761 // min(X, Y) >= Z X >= Z Y >= Z
5762 return FoldIntoCmpYZ();
5763 }
5764 } else {
5765 if (IsSame) {
5766 // Expr Fact Result
5767 // min(X, Y) < Z X >= Z Y < Z
5768 // min(X, Y) <= Z X > Z Y <= Z
5769 // max(X, Y) > Z X <= Z Y > Z
5770 // max(X, Y) >= Z X < Z Y >= Z
5771 return FoldIntoCmpYZ();
5772 } else {
5773 // Expr Fact Result
5774 // max(X, Y) < Z X >= Z false
5775 // max(X, Y) <= Z X > Z false
5776 // min(X, Y) > Z X <= Z false
5777 // min(X, Y) >= Z X < Z false
5778 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5779 }
5780 }
5781 break;
5782 }
5783 default:
5784 break;
5785 }
5786
5787 return nullptr;
5788}
5789
5790/// Match and fold patterns like:
5791/// icmp eq/ne X, min(max(X, Lo), Hi)
5792/// which represents a range check and can be repsented as a ConstantRange.
5793///
5794/// For icmp eq, build ConstantRange [Lo, Hi + 1) and convert to:
5795/// (X - Lo) u< (Hi + 1 - Lo)
5796/// For icmp ne, build ConstantRange [Hi + 1, Lo) and convert to:
5797/// (X - (Hi + 1)) u< (Lo - (Hi + 1))
5799 MinMaxIntrinsic *Min) {
5800 if (!I.isEquality() || !Min->hasOneUse() || !Min->isMin())
5801 return nullptr;
5802
5803 const APInt *Lo = nullptr, *Hi = nullptr;
5804 if (Min->isSigned()) {
5805 if (!match(Min->getLHS(), m_OneUse(m_SMax(m_Specific(X), m_APInt(Lo)))) ||
5806 !match(Min->getRHS(), m_APInt(Hi)) || !Lo->slt(*Hi))
5807 return nullptr;
5808 } else {
5809 if (!match(Min->getLHS(), m_OneUse(m_UMax(m_Specific(X), m_APInt(Lo)))) ||
5810 !match(Min->getRHS(), m_APInt(Hi)) || !Lo->ult(*Hi))
5811 return nullptr;
5812 }
5813
5816 APInt C, Offset;
5817 if (I.getPredicate() == ICmpInst::ICMP_EQ)
5818 CR.getEquivalentICmp(Pred, C, Offset);
5819 else
5820 CR.inverse().getEquivalentICmp(Pred, C, Offset);
5821
5822 if (!Offset.isZero())
5823 X = Builder.CreateAdd(X, ConstantInt::get(X->getType(), Offset));
5824
5825 return replaceInstUsesWith(
5826 I, Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), C)));
5827}
5828
5829// Canonicalize checking for a power-of-2-or-zero value:
5831 InstCombiner::BuilderTy &Builder) {
5832 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5833 const CmpInst::Predicate Pred = I.getPredicate();
5834 Value *A = nullptr;
5835 bool CheckIs;
5836 if (I.isEquality()) {
5837 // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
5838 // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
5839 if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
5840 m_Deferred(A)))) ||
5841 !match(Op1, m_ZeroInt()))
5842 A = nullptr;
5843
5844 // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
5845 // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
5846 if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
5847 A = Op1;
5848 else if (match(Op1,
5850 A = Op0;
5851
5852 CheckIs = Pred == ICmpInst::ICMP_EQ;
5853 } else if (ICmpInst::isUnsigned(Pred)) {
5854 // (A ^ (A-1)) u>= A --> ctpop(A) < 2 (two commuted variants)
5855 // ((A-1) ^ A) u< A --> ctpop(A) > 1 (two commuted variants)
5856
5857 if ((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5859 m_Specific(Op1))))) {
5860 A = Op1;
5861 CheckIs = Pred == ICmpInst::ICMP_UGE;
5862 } else if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5864 m_Specific(Op0))))) {
5865 A = Op0;
5866 CheckIs = Pred == ICmpInst::ICMP_ULE;
5867 }
5868 }
5869
5870 if (A) {
5871 Type *Ty = A->getType();
5872 CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
5873 return CheckIs ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop,
5874 ConstantInt::get(Ty, 2))
5875 : new ICmpInst(ICmpInst::ICMP_UGT, CtPop,
5876 ConstantInt::get(Ty, 1));
5877 }
5878
5879 return nullptr;
5880}
5881
5882/// Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
5883using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
5885 bool AllowRecursion) {
5887 if (!Inst || !Inst->hasOneUse())
5888 return;
5889
5890 switch (Inst->getOpcode()) {
5891 case Instruction::Add:
5892 Offsets.emplace_back(Instruction::Sub, Inst->getOperand(1));
5893 Offsets.emplace_back(Instruction::Sub, Inst->getOperand(0));
5894 break;
5895 case Instruction::Sub:
5896 Offsets.emplace_back(Instruction::Add, Inst->getOperand(1));
5897 break;
5898 case Instruction::Xor:
5899 Offsets.emplace_back(Instruction::Xor, Inst->getOperand(1));
5900 Offsets.emplace_back(Instruction::Xor, Inst->getOperand(0));
5901 break;
5902 case Instruction::Shl:
5903 if (Inst->hasNoSignedWrap())
5904 Offsets.emplace_back(Instruction::AShr, Inst->getOperand(1));
5905 if (Inst->hasNoUnsignedWrap())
5906 Offsets.emplace_back(Instruction::LShr, Inst->getOperand(1));
5907 break;
5908 case Instruction::Select:
5909 if (AllowRecursion) {
5910 collectOffsetOp(Inst->getOperand(1), Offsets, /*AllowRecursion=*/false);
5911 collectOffsetOp(Inst->getOperand(2), Offsets, /*AllowRecursion=*/false);
5912 }
5913 break;
5914 default:
5915 break;
5916 }
5917}
5918
5920
5924
5926 return {OffsetKind::Invalid, nullptr, nullptr, nullptr};
5927 }
5929 return {OffsetKind::Value, V, nullptr, nullptr};
5930 }
5931 static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV) {
5932 return {OffsetKind::Select, Cond, TrueV, FalseV};
5933 }
5934 bool isValid() const { return Kind != OffsetKind::Invalid; }
5936 switch (Kind) {
5938 llvm_unreachable("Invalid offset result");
5939 case OffsetKind::Value:
5940 return V0;
5941 case OffsetKind::Select:
5942 return Builder.CreateSelect(V0, V1, V2);
5943 }
5944 llvm_unreachable("Unknown OffsetKind enum");
5945 }
5946};
5947
5948/// Offset both sides of an equality icmp to see if we can save some
5949/// instructions: icmp eq/ne X, Y -> icmp eq/ne X op Z, Y op Z.
5950/// Note: This operation should not introduce poison.
5952 InstCombiner::BuilderTy &Builder,
5953 const SimplifyQuery &SQ) {
5954 assert(I.isEquality() && "Expected an equality icmp");
5955 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5956 if (!Op0->getType()->isIntOrIntVectorTy())
5957 return nullptr;
5958
5959 SmallVector<OffsetOp, 4> OffsetOps;
5960 collectOffsetOp(Op0, OffsetOps, /*AllowRecursion=*/true);
5961 collectOffsetOp(Op1, OffsetOps, /*AllowRecursion=*/true);
5962
5963 auto ApplyOffsetImpl = [&](Value *V, unsigned BinOpc, Value *RHS) -> Value * {
5964 switch (BinOpc) {
5965 // V = shl nsw X, RHS => X = ashr V, RHS
5966 case Instruction::AShr: {
5967 const APInt *CV, *CRHS;
5968 if (!(match(V, m_APInt(CV)) && match(RHS, m_APInt(CRHS)) &&
5969 CV->ashr(*CRHS).shl(*CRHS) == *CV) &&
5971 return nullptr;
5972 break;
5973 }
5974 // V = shl nuw X, RHS => X = lshr V, RHS
5975 case Instruction::LShr: {
5976 const APInt *CV, *CRHS;
5977 if (!(match(V, m_APInt(CV)) && match(RHS, m_APInt(CRHS)) &&
5978 CV->lshr(*CRHS).shl(*CRHS) == *CV) &&
5980 return nullptr;
5981 break;
5982 }
5983 default:
5984 break;
5985 }
5986
5987 Value *Simplified = simplifyBinOp(BinOpc, V, RHS, SQ);
5988 if (!Simplified)
5989 return nullptr;
5990 // Reject constant expressions as they don't simplify things.
5991 if (isa<Constant>(Simplified) && !match(Simplified, m_ImmConstant()))
5992 return nullptr;
5993 // Check if the transformation introduces poison.
5994 return impliesPoison(RHS, V) ? Simplified : nullptr;
5995 };
5996
5997 auto ApplyOffset = [&](Value *V, unsigned BinOpc,
5998 Value *RHS) -> OffsetResult {
5999 if (auto *Sel = dyn_cast<SelectInst>(V)) {
6000 if (!Sel->hasOneUse())
6001 return OffsetResult::invalid();
6002 Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc, RHS);
6003 if (!TrueVal)
6004 return OffsetResult::invalid();
6005 Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc, RHS);
6006 if (!FalseVal)
6007 return OffsetResult::invalid();
6008 return OffsetResult::select(Sel->getCondition(), TrueVal, FalseVal);
6009 }
6010 if (Value *Simplified = ApplyOffsetImpl(V, BinOpc, RHS))
6011 return OffsetResult::value(Simplified);
6012 return OffsetResult::invalid();
6013 };
6014
6015 for (auto [BinOp, RHS] : OffsetOps) {
6016 auto BinOpc = static_cast<unsigned>(BinOp);
6017
6018 auto Op0Result = ApplyOffset(Op0, BinOpc, RHS);
6019 if (!Op0Result.isValid())
6020 continue;
6021 auto Op1Result = ApplyOffset(Op1, BinOpc, RHS);
6022 if (!Op1Result.isValid())
6023 continue;
6024
6025 Value *NewLHS = Op0Result.materialize(Builder);
6026 Value *NewRHS = Op1Result.materialize(Builder);
6027 return new ICmpInst(I.getPredicate(), NewLHS, NewRHS);
6028 }
6029
6030 return nullptr;
6031}
6032
6034 if (!I.isEquality())
6035 return nullptr;
6036
6037 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6038 const CmpInst::Predicate Pred = I.getPredicate();
6039 Value *A, *B, *C, *D;
6040 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
6041 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
6042 Value *OtherVal = A == Op1 ? B : A;
6043 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
6044 }
6045
6046 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
6047 // A^c1 == C^c2 --> A == C^(c1^c2)
6048 ConstantInt *C1, *C2;
6049 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
6050 Op1->hasOneUse()) {
6051 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
6052 Value *Xor = Builder.CreateXor(C, NC);
6053 return new ICmpInst(Pred, A, Xor);
6054 }
6055
6056 // A^B == A^D -> B == D
6057 if (A == C)
6058 return new ICmpInst(Pred, B, D);
6059 if (A == D)
6060 return new ICmpInst(Pred, B, C);
6061 if (B == C)
6062 return new ICmpInst(Pred, A, D);
6063 if (B == D)
6064 return new ICmpInst(Pred, A, C);
6065 }
6066 }
6067
6068 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
6069 // A == (A^B) -> B == 0
6070 Value *OtherVal = A == Op0 ? B : A;
6071 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
6072 }
6073
6074 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6075 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
6076 match(Op1, m_And(m_Value(C), m_Value(D)))) {
6077 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
6078
6079 if (A == C) {
6080 X = B;
6081 Y = D;
6082 Z = A;
6083 } else if (A == D) {
6084 X = B;
6085 Y = C;
6086 Z = A;
6087 } else if (B == C) {
6088 X = A;
6089 Y = D;
6090 Z = B;
6091 } else if (B == D) {
6092 X = A;
6093 Y = C;
6094 Z = B;
6095 }
6096
6097 if (X) {
6098 // If X^Y is a negative power of two, then `icmp eq/ne (Z & NegP2), 0`
6099 // will fold to `icmp ult/uge Z, -NegP2` incurringb no additional
6100 // instructions.
6101 const APInt *C0, *C1;
6102 bool XorIsNegP2 = match(X, m_APInt(C0)) && match(Y, m_APInt(C1)) &&
6103 (*C0 ^ *C1).isNegatedPowerOf2();
6104
6105 // If either Op0/Op1 are both one use or X^Y will constant fold and one of
6106 // Op0/Op1 are one use, proceed. In those cases we are instruction neutral
6107 // but `icmp eq/ne A, 0` is easier to analyze than `icmp eq/ne A, B`.
6108 int UseCnt =
6109 int(Op0->hasOneUse()) + int(Op1->hasOneUse()) +
6110 (int(match(X, m_ImmConstant()) && match(Y, m_ImmConstant())));
6111 if (XorIsNegP2 || UseCnt >= 2) {
6112 // Build (X^Y) & Z
6113 Op1 = Builder.CreateXor(X, Y);
6114 Op1 = Builder.CreateAnd(Op1, Z);
6115 return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
6116 }
6117 }
6118 }
6119
6120 {
6121 // Similar to above, but specialized for constant because invert is needed:
6122 // (X | C) == (Y | C) --> (X ^ Y) & ~C == 0
6123 Value *X, *Y;
6124 Constant *C;
6125 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_Constant(C)))) &&
6126 match(Op1, m_OneUse(m_Or(m_Value(Y), m_Specific(C))))) {
6127 Value *Xor = Builder.CreateXor(X, Y);
6128 Value *And = Builder.CreateAnd(Xor, ConstantExpr::getNot(C));
6129 return new ICmpInst(Pred, And, Constant::getNullValue(And->getType()));
6130 }
6131 }
6132
6133 if (match(Op1, m_ZExt(m_Value(A))) &&
6134 (Op0->hasOneUse() || Op1->hasOneUse())) {
6135 // (B & (Pow2C-1)) == zext A --> A == trunc B
6136 // (B & (Pow2C-1)) != zext A --> A != trunc B
6137 const APInt *MaskC;
6138 if (match(Op0, m_And(m_Value(B), m_LowBitMask(MaskC))) &&
6139 MaskC->countr_one() == A->getType()->getScalarSizeInBits())
6140 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
6141 }
6142
6143 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
6144 // For lshr and ashr pairs.
6145 const APInt *AP1, *AP2;
6146 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6147 match(Op1, m_OneUse(m_LShr(m_Value(B), m_APIntAllowPoison(AP2))))) ||
6148 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6149 match(Op1, m_OneUse(m_AShr(m_Value(B), m_APIntAllowPoison(AP2)))))) {
6150 if (*AP1 != *AP2)
6151 return nullptr;
6152 unsigned TypeBits = AP1->getBitWidth();
6153 unsigned ShAmt = AP1->getLimitedValue(TypeBits);
6154 if (ShAmt < TypeBits && ShAmt != 0) {
6155 ICmpInst::Predicate NewPred =
6157 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6158 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
6159 return new ICmpInst(NewPred, Xor, ConstantInt::get(A->getType(), CmpVal));
6160 }
6161 }
6162
6163 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
6164 ConstantInt *Cst1;
6165 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
6166 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
6167 unsigned TypeBits = Cst1->getBitWidth();
6168 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
6169 if (ShAmt < TypeBits && ShAmt != 0) {
6170 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6171 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
6172 Value *And =
6173 Builder.CreateAnd(Xor, Builder.getInt(AndVal), I.getName() + ".mask");
6174 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
6175 }
6176 }
6177
6178 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
6179 // "icmp (and X, mask), cst"
6180 uint64_t ShAmt = 0;
6181 if (Op0->hasOneUse() &&
6182 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
6183 match(Op1, m_ConstantInt(Cst1)) &&
6184 // Only do this when A has multiple uses. This is most important to do
6185 // when it exposes other optimizations.
6186 !A->hasOneUse()) {
6187 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
6188
6189 if (ShAmt < ASize) {
6190 APInt MaskV =
6192 MaskV <<= ShAmt;
6193
6194 APInt CmpV = Cst1->getValue().zext(ASize);
6195 CmpV <<= ShAmt;
6196
6197 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
6198 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
6199 }
6200 }
6201
6203 return ICmp;
6204
6205 // Match icmp eq (trunc (lshr A, BW), (ashr (trunc A), BW-1)), which checks
6206 // the top BW/2 + 1 bits are all the same. Create "A >=s INT_MIN && A <=s
6207 // INT_MAX", which we generate as "icmp ult (add A, 2^(BW-1)), 2^BW" to skip a
6208 // few steps of instcombine.
6209 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6210 if (match(Op0, m_AShr(m_Trunc(m_Value(A)), m_SpecificInt(BitWidth - 1))) &&
6212 A->getType()->getScalarSizeInBits() == BitWidth * 2 &&
6213 (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse())) {
6215 Value *Add = Builder.CreateAdd(A, ConstantInt::get(A->getType(), C));
6216 return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULT
6218 Add, ConstantInt::get(A->getType(), C.shl(1)));
6219 }
6220
6221 // Canonicalize:
6222 // Assume B_Pow2 != 0
6223 // 1. A & B_Pow2 != B_Pow2 -> A & B_Pow2 == 0
6224 // 2. A & B_Pow2 == B_Pow2 -> A & B_Pow2 != 0
6225 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())) &&
6226 isKnownToBeAPowerOfTwo(Op1, /* OrZero */ false, &I))
6227 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
6229
6230 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())) &&
6231 isKnownToBeAPowerOfTwo(Op0, /* OrZero */ false, &I))
6232 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op1,
6233 ConstantInt::getNullValue(Op1->getType()));
6234
6235 // Canonicalize:
6236 // icmp eq/ne X, OneUse(rotate-right(X))
6237 // -> icmp eq/ne X, rotate-left(X)
6238 // We generally try to convert rotate-right -> rotate-left, this just
6239 // canonicalizes another case.
6240 if (match(&I, m_c_ICmp(m_Value(A),
6242 m_Deferred(A), m_Deferred(A), m_Value(B))))))
6243 return new ICmpInst(
6244 Pred, A,
6245 Builder.CreateIntrinsic(Op0->getType(), Intrinsic::fshl, {A, A, B}));
6246
6247 // Canonicalize:
6248 // icmp eq/ne OneUse(A ^ Cst), B --> icmp eq/ne (A ^ B), Cst
6249 Constant *Cst;
6252 return new ICmpInst(Pred, Builder.CreateXor(A, B), Cst);
6253
6254 {
6255 // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6256 auto m_Matcher =
6259 m_Sub(m_Value(B), m_Deferred(A)));
6260 std::optional<bool> IsZero = std::nullopt;
6261 if (match(&I, m_c_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)),
6262 m_Deferred(A))))
6263 IsZero = false;
6264 // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6265 else if (match(&I,
6266 m_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)), m_Zero())))
6267 IsZero = true;
6268
6269 if (IsZero && isKnownToBeAPowerOfTwo(A, /* OrZero */ true, &I))
6270 // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6271 // -> (icmp eq/ne (and X, P2), 0)
6272 // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6273 // -> (icmp eq/ne (and X, P2), P2)
6274 return new ICmpInst(Pred, Builder.CreateAnd(B, A),
6275 *IsZero ? A
6276 : ConstantInt::getNullValue(A->getType()));
6277 }
6278
6279 if (auto *Res = foldICmpEqualityWithOffset(
6280 I, Builder, getSimplifyQuery().getWithInstruction(&I)))
6281 return Res;
6282
6283 return nullptr;
6284}
6285
6287 ICmpInst::Predicate Pred = ICmp.getPredicate();
6288 Value *Op0 = ICmp.getOperand(0), *Op1 = ICmp.getOperand(1);
6289
6290 // Try to canonicalize trunc + compare-to-constant into a mask + cmp.
6291 // The trunc masks high bits while the compare may effectively mask low bits.
6292 Value *X;
6293 const APInt *C;
6294 if (!match(Op0, m_OneUse(m_Trunc(m_Value(X)))) || !match(Op1, m_APInt(C)))
6295 return nullptr;
6296
6297 // This matches patterns corresponding to tests of the signbit as well as:
6298 // (trunc X) pred C2 --> (X & Mask) == C
6299 if (auto Res = decomposeBitTestICmp(Op0, Op1, Pred, /*LookThroughTrunc=*/true,
6300 /*AllowNonZeroC=*/true)) {
6301 Value *And = Builder.CreateAnd(Res->X, Res->Mask);
6302 Constant *C = ConstantInt::get(Res->X->getType(), Res->C);
6303 return new ICmpInst(Res->Pred, And, C);
6304 }
6305
6306 unsigned SrcBits = X->getType()->getScalarSizeInBits();
6307 if (auto *II = dyn_cast<IntrinsicInst>(X)) {
6308 if (II->getIntrinsicID() == Intrinsic::cttz ||
6309 II->getIntrinsicID() == Intrinsic::ctlz) {
6310 unsigned MaxRet = SrcBits;
6311 // If the "is_zero_poison" argument is set, then we know at least
6312 // one bit is set in the input, so the result is always at least one
6313 // less than the full bitwidth of that input.
6314 if (match(II->getArgOperand(1), m_One()))
6315 MaxRet--;
6316
6317 // Make sure the destination is wide enough to hold the largest output of
6318 // the intrinsic.
6319 if (llvm::Log2_32(MaxRet) + 1 <= Op0->getType()->getScalarSizeInBits())
6320 if (Instruction *I =
6321 foldICmpIntrinsicWithConstant(ICmp, II, C->zext(SrcBits)))
6322 return I;
6323 }
6324 }
6325
6326 return nullptr;
6327}
6328
6330 assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
6331 auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
6332 Value *X;
6333 if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
6334 return nullptr;
6335
6336 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6337 bool IsSignedCmp = ICmp.isSigned();
6338
6339 // icmp Pred (ext X), (ext Y)
6340 Value *Y;
6341 if (match(ICmp.getOperand(1), m_ZExtOrSExt(m_Value(Y)))) {
6342 bool IsZext0 = isa<ZExtInst>(ICmp.getOperand(0));
6343 bool IsZext1 = isa<ZExtInst>(ICmp.getOperand(1));
6344
6345 if (IsZext0 != IsZext1) {
6346 // If X and Y and both i1
6347 // (icmp eq/ne (zext X) (sext Y))
6348 // eq -> (icmp eq (or X, Y), 0)
6349 // ne -> (icmp ne (or X, Y), 0)
6350 if (ICmp.isEquality() && X->getType()->isIntOrIntVectorTy(1) &&
6351 Y->getType()->isIntOrIntVectorTy(1))
6352 return new ICmpInst(ICmp.getPredicate(), Builder.CreateOr(X, Y),
6353 Constant::getNullValue(X->getType()));
6354
6355 // If we have mismatched casts and zext has the nneg flag, we can
6356 // treat the "zext nneg" as "sext". Otherwise, we cannot fold and quit.
6357
6358 auto *NonNegInst0 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(0));
6359 auto *NonNegInst1 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(1));
6360
6361 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6362 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6363
6364 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6365 IsSignedExt = true;
6366 else
6367 return nullptr;
6368 }
6369
6370 // Not an extension from the same type?
6371 Type *XTy = X->getType(), *YTy = Y->getType();
6372 if (XTy != YTy) {
6373 // One of the casts must have one use because we are creating a new cast.
6374 if (!ICmp.getOperand(0)->hasOneUse() && !ICmp.getOperand(1)->hasOneUse())
6375 return nullptr;
6376 // Extend the narrower operand to the type of the wider operand.
6377 CastInst::CastOps CastOpcode =
6378 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6379 if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
6380 X = Builder.CreateCast(CastOpcode, X, YTy);
6381 else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
6382 Y = Builder.CreateCast(CastOpcode, Y, XTy);
6383 else
6384 return nullptr;
6385 }
6386
6387 // (zext X) == (zext Y) --> X == Y
6388 // (sext X) == (sext Y) --> X == Y
6389 if (ICmp.isEquality())
6390 return new ICmpInst(ICmp.getPredicate(), X, Y);
6391
6392 // A signed comparison of sign extended values simplifies into a
6393 // signed comparison.
6394 if (IsSignedCmp && IsSignedExt)
6395 return new ICmpInst(ICmp.getPredicate(), X, Y);
6396
6397 // The other three cases all fold into an unsigned comparison.
6398 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
6399 }
6400
6401 // Below here, we are only folding a compare with constant.
6402 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
6403 if (!C)
6404 return nullptr;
6405
6406 // If a lossless truncate is possible...
6407 Type *SrcTy = CastOp0->getSrcTy();
6408 Constant *Res = getLosslessInvCast(C, SrcTy, CastOp0->getOpcode(), DL);
6409 if (Res) {
6410 if (ICmp.isEquality())
6411 return new ICmpInst(ICmp.getPredicate(), X, Res);
6412
6413 // A signed comparison of sign extended values simplifies into a
6414 // signed comparison.
6415 if (IsSignedExt && IsSignedCmp)
6416 return new ICmpInst(ICmp.getPredicate(), X, Res);
6417
6418 // The other three cases all fold into an unsigned comparison.
6419 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res);
6420 }
6421
6422 // The re-extended constant changed, partly changed (in the case of a vector),
6423 // or could not be determined to be equal (in the case of a constant
6424 // expression), so the constant cannot be represented in the shorter type.
6425 // All the cases that fold to true or false will have already been handled
6426 // by simplifyICmpInst, so only deal with the tricky case.
6427 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
6428 return nullptr;
6429
6430 // Is source op positive?
6431 // icmp ult (sext X), C --> icmp sgt X, -1
6432 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
6434
6435 // Is source op negative?
6436 // icmp ugt (sext X), C --> icmp slt X, 0
6437 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
6439}
6440
6441/// Handle icmp (cast x), (cast or constant).
6443 // If any operand of ICmp is a inttoptr roundtrip cast then remove it as
6444 // icmp compares only pointer's value.
6445 // icmp (inttoptr (ptrtoint p1)), p2 --> icmp p1, p2.
6446 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(0));
6447 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(1));
6448 if (SimplifiedOp0 || SimplifiedOp1)
6449 return new ICmpInst(ICmp.getPredicate(),
6450 SimplifiedOp0 ? SimplifiedOp0 : ICmp.getOperand(0),
6451 SimplifiedOp1 ? SimplifiedOp1 : ICmp.getOperand(1));
6452
6453 auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
6454 if (!CastOp0)
6455 return nullptr;
6456 if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
6457 return nullptr;
6458
6459 Value *Op0Src = CastOp0->getOperand(0);
6460 Type *SrcTy = CastOp0->getSrcTy();
6461 Type *DestTy = CastOp0->getDestTy();
6462
6463 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
6464 // integer type is the same size as the pointer type.
6465 auto CompatibleSizes = [&](Type *PtrTy, Type *IntTy) {
6466 if (isa<VectorType>(PtrTy)) {
6467 PtrTy = cast<VectorType>(PtrTy)->getElementType();
6468 IntTy = cast<VectorType>(IntTy)->getElementType();
6469 }
6470 return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
6471 };
6472 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6473 CompatibleSizes(SrcTy, DestTy)) {
6474 Value *NewOp1 = nullptr;
6475 if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
6476 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6477 if (PtrSrc->getType() == Op0Src->getType())
6478 NewOp1 = PtrToIntOp1->getOperand(0);
6479 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6480 NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
6481 }
6482
6483 if (NewOp1)
6484 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6485 }
6486
6487 // Do the same in the other direction for icmp (inttoptr x), (inttoptr/c).
6488 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6489 CompatibleSizes(DestTy, SrcTy)) {
6490 Value *NewOp1 = nullptr;
6491 if (auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.getOperand(1))) {
6492 Value *IntSrc = IntToPtrOp1->getOperand(0);
6493 if (IntSrc->getType() == Op0Src->getType())
6494 NewOp1 = IntToPtrOp1->getOperand(0);
6495 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6496 NewOp1 = ConstantFoldConstant(ConstantExpr::getPtrToInt(RHSC, SrcTy), DL);
6497 }
6498
6499 if (NewOp1)
6500 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6501 }
6502
6503 if (Instruction *R = foldICmpWithTrunc(ICmp))
6504 return R;
6505
6506 return foldICmpWithZextOrSext(ICmp);
6507}
6508
6510 bool IsSigned) {
6511 switch (BinaryOp) {
6512 default:
6513 llvm_unreachable("Unsupported binary op");
6514 case Instruction::Add:
6515 case Instruction::Sub:
6516 return match(RHS, m_Zero());
6517 case Instruction::Mul:
6518 return !(RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
6519 match(RHS, m_One());
6520 }
6521}
6522
6525 bool IsSigned, Value *LHS, Value *RHS,
6526 Instruction *CxtI) const {
6527 switch (BinaryOp) {
6528 default:
6529 llvm_unreachable("Unsupported binary op");
6530 case Instruction::Add:
6531 if (IsSigned)
6532 return computeOverflowForSignedAdd(LHS, RHS, CxtI);
6533 else
6534 return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
6535 case Instruction::Sub:
6536 if (IsSigned)
6537 return computeOverflowForSignedSub(LHS, RHS, CxtI);
6538 else
6539 return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
6540 case Instruction::Mul:
6541 if (IsSigned)
6542 return computeOverflowForSignedMul(LHS, RHS, CxtI);
6543 else
6544 return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
6545 }
6546}
6547
6548bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
6549 bool IsSigned, Value *LHS,
6550 Value *RHS, Instruction &OrigI,
6551 Value *&Result,
6552 Constant *&Overflow) {
6553 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
6554 std::swap(LHS, RHS);
6555
6556 // If the overflow check was an add followed by a compare, the insertion point
6557 // may be pointing to the compare. We want to insert the new instructions
6558 // before the add in case there are uses of the add between the add and the
6559 // compare.
6560 Builder.SetInsertPoint(&OrigI);
6561
6562 Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
6563 if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
6564 OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
6565
6566 if (isNeutralValue(BinaryOp, RHS, IsSigned)) {
6567 Result = LHS;
6568 Overflow = ConstantInt::getFalse(OverflowTy);
6569 return true;
6570 }
6571
6572 switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
6574 return false;
6577 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6578 Result->takeName(&OrigI);
6579 Overflow = ConstantInt::getTrue(OverflowTy);
6580 return true;
6582 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6583 Result->takeName(&OrigI);
6584 Overflow = ConstantInt::getFalse(OverflowTy);
6585 if (auto *Inst = dyn_cast<Instruction>(Result)) {
6586 if (IsSigned)
6587 Inst->setHasNoSignedWrap();
6588 else
6589 Inst->setHasNoUnsignedWrap();
6590 }
6591 return true;
6592 }
6593
6594 llvm_unreachable("Unexpected overflow result");
6595}
6596
6597/// Recognize and process idiom involving test for multiplication
6598/// overflow.
6599///
6600/// The caller has matched a pattern of the form:
6601/// I = cmp u (mul(zext A, zext B), V
6602/// The function checks if this is a test for overflow and if so replaces
6603/// multiplication with call to 'mul.with.overflow' intrinsic.
6604///
6605/// \param I Compare instruction.
6606/// \param MulVal Result of 'mult' instruction. It is one of the arguments of
6607/// the compare instruction. Must be of integer type.
6608/// \param OtherVal The other argument of compare instruction.
6609/// \returns Instruction which must replace the compare instruction, NULL if no
6610/// replacement required.
6612 const APInt *OtherVal,
6613 InstCombinerImpl &IC) {
6614 // Don't bother doing this transformation for pointers, don't do it for
6615 // vectors.
6616 if (!isa<IntegerType>(MulVal->getType()))
6617 return nullptr;
6618
6619 auto *MulInstr = dyn_cast<Instruction>(MulVal);
6620 if (!MulInstr)
6621 return nullptr;
6622 assert(MulInstr->getOpcode() == Instruction::Mul);
6623
6624 auto *LHS = cast<ZExtInst>(MulInstr->getOperand(0)),
6625 *RHS = cast<ZExtInst>(MulInstr->getOperand(1));
6626 assert(LHS->getOpcode() == Instruction::ZExt);
6627 assert(RHS->getOpcode() == Instruction::ZExt);
6628 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
6629
6630 // Calculate type and width of the result produced by mul.with.overflow.
6631 Type *TyA = A->getType(), *TyB = B->getType();
6632 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
6633 WidthB = TyB->getPrimitiveSizeInBits();
6634 unsigned MulWidth;
6635 Type *MulType;
6636 if (WidthB > WidthA) {
6637 MulWidth = WidthB;
6638 MulType = TyB;
6639 } else {
6640 MulWidth = WidthA;
6641 MulType = TyA;
6642 }
6643
6644 // In order to replace the original mul with a narrower mul.with.overflow,
6645 // all uses must ignore upper bits of the product. The number of used low
6646 // bits must be not greater than the width of mul.with.overflow.
6647 if (MulVal->hasNUsesOrMore(2))
6648 for (User *U : MulVal->users()) {
6649 if (U == &I)
6650 continue;
6651 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6652 // Check if truncation ignores bits above MulWidth.
6653 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
6654 if (TruncWidth > MulWidth)
6655 return nullptr;
6656 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6657 // Check if AND ignores bits above MulWidth.
6658 if (BO->getOpcode() != Instruction::And)
6659 return nullptr;
6660 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6661 const APInt &CVal = CI->getValue();
6662 if (CVal.getBitWidth() - CVal.countl_zero() > MulWidth)
6663 return nullptr;
6664 } else {
6665 // In this case we could have the operand of the binary operation
6666 // being defined in another block, and performing the replacement
6667 // could break the dominance relation.
6668 return nullptr;
6669 }
6670 } else {
6671 // Other uses prohibit this transformation.
6672 return nullptr;
6673 }
6674 }
6675
6676 // Recognize patterns
6677 switch (I.getPredicate()) {
6678 case ICmpInst::ICMP_UGT: {
6679 // Recognize pattern:
6680 // mulval = mul(zext A, zext B)
6681 // cmp ugt mulval, max
6682 APInt MaxVal = APInt::getMaxValue(MulWidth);
6683 MaxVal = MaxVal.zext(OtherVal->getBitWidth());
6684 if (MaxVal.eq(*OtherVal))
6685 break; // Recognized
6686 return nullptr;
6687 }
6688
6689 case ICmpInst::ICMP_ULT: {
6690 // Recognize pattern:
6691 // mulval = mul(zext A, zext B)
6692 // cmp ule mulval, max + 1
6693 APInt MaxVal = APInt::getOneBitSet(OtherVal->getBitWidth(), MulWidth);
6694 if (MaxVal.eq(*OtherVal))
6695 break; // Recognized
6696 return nullptr;
6697 }
6698
6699 default:
6700 return nullptr;
6701 }
6702
6703 InstCombiner::BuilderTy &Builder = IC.Builder;
6704 Builder.SetInsertPoint(MulInstr);
6705
6706 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
6707 Value *MulA = A, *MulB = B;
6708 if (WidthA < MulWidth)
6709 MulA = Builder.CreateZExt(A, MulType);
6710 if (WidthB < MulWidth)
6711 MulB = Builder.CreateZExt(B, MulType);
6712 CallInst *Call =
6713 Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
6714 {MulA, MulB}, /*FMFSource=*/nullptr, "umul");
6715 IC.addToWorklist(MulInstr);
6716
6717 // If there are uses of mul result other than the comparison, we know that
6718 // they are truncation or binary AND. Change them to use result of
6719 // mul.with.overflow and adjust properly mask/size.
6720 if (MulVal->hasNUsesOrMore(2)) {
6721 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
6722 for (User *U : make_early_inc_range(MulVal->users())) {
6723 if (U == &I)
6724 continue;
6725 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6726 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6727 IC.replaceInstUsesWith(*TI, Mul);
6728 else
6729 TI->setOperand(0, Mul);
6730 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6731 assert(BO->getOpcode() == Instruction::And);
6732 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
6733 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
6734 APInt ShortMask = CI->getValue().trunc(MulWidth);
6735 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
6736 Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
6737 IC.replaceInstUsesWith(*BO, Zext);
6738 } else {
6739 llvm_unreachable("Unexpected Binary operation");
6740 }
6742 }
6743 }
6744
6745 // The original icmp gets replaced with the overflow value, maybe inverted
6746 // depending on predicate.
6747 if (I.getPredicate() == ICmpInst::ICMP_ULT) {
6748 Value *Res = Builder.CreateExtractValue(Call, 1);
6749 return BinaryOperator::CreateNot(Res);
6750 }
6751
6752 return ExtractValueInst::Create(Call, 1);
6753}
6754
6755/// When performing a comparison against a constant, it is possible that not all
6756/// the bits in the LHS are demanded. This helper method computes the mask that
6757/// IS demanded.
6759 const APInt *RHS;
6760 if (!match(I.getOperand(1), m_APInt(RHS)))
6762
6763 // If this is a normal comparison, it demands all bits. If it is a sign bit
6764 // comparison, it only demands the sign bit.
6765 bool UnusedBit;
6766 if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
6768
6769 switch (I.getPredicate()) {
6770 // For a UGT comparison, we don't care about any bits that
6771 // correspond to the trailing ones of the comparand. The value of these
6772 // bits doesn't impact the outcome of the comparison, because any value
6773 // greater than the RHS must differ in a bit higher than these due to carry.
6774 case ICmpInst::ICMP_UGT:
6775 return APInt::getBitsSetFrom(BitWidth, RHS->countr_one());
6776
6777 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
6778 // Any value less than the RHS must differ in a higher bit because of carries.
6779 case ICmpInst::ICMP_ULT:
6780 return APInt::getBitsSetFrom(BitWidth, RHS->countr_zero());
6781
6782 default:
6784 }
6785}
6786
6787/// Check that one use is in the same block as the definition and all
6788/// other uses are in blocks dominated by a given block.
6789///
6790/// \param DI Definition
6791/// \param UI Use
6792/// \param DB Block that must dominate all uses of \p DI outside
6793/// the parent block
6794/// \return true when \p UI is the only use of \p DI in the parent block
6795/// and all other uses of \p DI are in blocks dominated by \p DB.
6796///
6798 const Instruction *UI,
6799 const BasicBlock *DB) const {
6800 assert(DI && UI && "Instruction not defined\n");
6801 // Ignore incomplete definitions.
6802 if (!DI->getParent())
6803 return false;
6804 // DI and UI must be in the same block.
6805 if (DI->getParent() != UI->getParent())
6806 return false;
6807 // Protect from self-referencing blocks.
6808 if (DI->getParent() == DB)
6809 return false;
6810 for (const User *U : DI->users()) {
6811 auto *Usr = cast<Instruction>(U);
6812 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
6813 return false;
6814 }
6815 return true;
6816}
6817
6818/// Return true when the instruction sequence within a block is select-cmp-br.
6820 const BasicBlock *BB = SI->getParent();
6821 if (!BB)
6822 return false;
6824 if (!BI || BI->getNumSuccessors() != 2)
6825 return false;
6826 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
6827 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
6828 return false;
6829 return true;
6830}
6831
6832/// True when a select result is replaced by one of its operands
6833/// in select-icmp sequence. This will eventually result in the elimination
6834/// of the select.
6835///
6836/// \param SI Select instruction
6837/// \param Icmp Compare instruction
6838/// \param SIOpd Operand that replaces the select
6839///
6840/// Notes:
6841/// - The replacement is global and requires dominator information
6842/// - The caller is responsible for the actual replacement
6843///
6844/// Example:
6845///
6846/// entry:
6847/// %4 = select i1 %3, %C* %0, %C* null
6848/// %5 = icmp eq %C* %4, null
6849/// br i1 %5, label %9, label %7
6850/// ...
6851/// ; <label>:7 ; preds = %entry
6852/// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
6853/// ...
6854///
6855/// can be transformed to
6856///
6857/// %5 = icmp eq %C* %0, null
6858/// %6 = select i1 %3, i1 %5, i1 true
6859/// br i1 %6, label %9, label %7
6860/// ...
6861/// ; <label>:7 ; preds = %entry
6862/// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
6863///
6864/// Similar when the first operand of the select is a constant or/and
6865/// the compare is for not equal rather than equal.
6866///
6867/// NOTE: The function is only called when the select and compare constants
6868/// are equal, the optimization can work only for EQ predicates. This is not a
6869/// major restriction since a NE compare should be 'normalized' to an equal
6870/// compare, which usually happens in the combiner and test case
6871/// select-cmp-br.ll checks for it.
6873 const ICmpInst *Icmp,
6874 const unsigned SIOpd) {
6875 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
6877 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
6878 // The check for the single predecessor is not the best that can be
6879 // done. But it protects efficiently against cases like when SI's
6880 // home block has two successors, Succ and Succ1, and Succ1 predecessor
6881 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
6882 // replaced can be reached on either path. So the uniqueness check
6883 // guarantees that the path all uses of SI (outside SI's parent) are on
6884 // is disjoint from all other paths out of SI. But that information
6885 // is more expensive to compute, and the trade-off here is in favor
6886 // of compile-time. It should also be noticed that we check for a single
6887 // predecessor and not only uniqueness. This to handle the situation when
6888 // Succ and Succ1 points to the same basic block.
6889 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
6890 NumSel++;
6891 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
6892 return true;
6893 }
6894 }
6895 return false;
6896}
6897
6898/// Try to fold the comparison based on range information we can get by checking
6899/// whether bits are known to be zero or one in the inputs.
6901 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6902 Type *Ty = Op0->getType();
6903 ICmpInst::Predicate Pred = I.getPredicate();
6904
6905 // Get scalar or pointer size.
6906 unsigned BitWidth = Ty->isIntOrIntVectorTy()
6907 ? Ty->getScalarSizeInBits()
6908 : DL.getPointerTypeSizeInBits(Ty->getScalarType());
6909
6910 if (!BitWidth)
6911 return nullptr;
6912
6913 KnownBits Op0Known(BitWidth);
6914 KnownBits Op1Known(BitWidth);
6915
6916 {
6917 // Don't use dominating conditions when folding icmp using known bits. This
6918 // may convert signed into unsigned predicates in ways that other passes
6919 // (especially IndVarSimplify) may not be able to reliably undo.
6920 SimplifyQuery Q = SQ.getWithoutDomCondCache().getWithInstruction(&I);
6922 Op0Known, Q))
6923 return &I;
6924
6925 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known, Q))
6926 return &I;
6927 }
6928
6929 if (!isa<Constant>(Op0) && Op0Known.isConstant())
6930 return new ICmpInst(
6931 Pred, ConstantExpr::getIntegerValue(Ty, Op0Known.getConstant()), Op1);
6932 if (!isa<Constant>(Op1) && Op1Known.isConstant())
6933 return new ICmpInst(
6934 Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Known.getConstant()));
6935
6936 if (std::optional<bool> Res = ICmpInst::compare(Op0Known, Op1Known, Pred))
6937 return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *Res));
6938
6939 // Given the known and unknown bits, compute a range that the LHS could be
6940 // in. Compute the Min, Max and RHS values based on the known bits. For the
6941 // EQ and NE we use unsigned values.
6942 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6943 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6944 if (I.isSigned()) {
6945 Op0Min = Op0Known.getSignedMinValue();
6946 Op0Max = Op0Known.getSignedMaxValue();
6947 Op1Min = Op1Known.getSignedMinValue();
6948 Op1Max = Op1Known.getSignedMaxValue();
6949 } else {
6950 Op0Min = Op0Known.getMinValue();
6951 Op0Max = Op0Known.getMaxValue();
6952 Op1Min = Op1Known.getMinValue();
6953 Op1Max = Op1Known.getMaxValue();
6954 }
6955
6956 // Don't break up a clamp pattern -- (min(max X, Y), Z) -- by replacing a
6957 // min/max canonical compare with some other compare. That could lead to
6958 // conflict with select canonicalization and infinite looping.
6959 // FIXME: This constraint may go away if min/max intrinsics are canonical.
6960 auto isMinMaxCmp = [&](Instruction &Cmp) {
6961 if (!Cmp.hasOneUse())
6962 return false;
6963 Value *A, *B;
6964 SelectPatternFlavor SPF = matchSelectPattern(Cmp.user_back(), A, B).Flavor;
6966 return false;
6967 return match(Op0, m_MaxOrMin(m_Value(), m_Value())) ||
6968 match(Op1, m_MaxOrMin(m_Value(), m_Value()));
6969 };
6970 if (!isMinMaxCmp(I)) {
6971 switch (Pred) {
6972 default:
6973 break;
6974 case ICmpInst::ICMP_ULT: {
6975 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6976 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6977 const APInt *CmpC;
6978 if (match(Op1, m_APInt(CmpC))) {
6979 // A <u C -> A == C-1 if min(A)+1 == C
6980 if (*CmpC == Op0Min + 1)
6981 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6982 ConstantInt::get(Op1->getType(), *CmpC - 1));
6983 // X <u C --> X == 0, if the number of zero bits in the bottom of X
6984 // exceeds the log2 of C.
6985 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
6986 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6987 Constant::getNullValue(Op1->getType()));
6988 }
6989 break;
6990 }
6991 case ICmpInst::ICMP_UGT: {
6992 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6993 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6994 const APInt *CmpC;
6995 if (match(Op1, m_APInt(CmpC))) {
6996 // A >u C -> A == C+1 if max(a)-1 == C
6997 if (*CmpC == Op0Max - 1)
6998 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6999 ConstantInt::get(Op1->getType(), *CmpC + 1));
7000 // X >u C --> X != 0, if the number of zero bits in the bottom of X
7001 // exceeds the log2 of C.
7002 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
7003 return new ICmpInst(ICmpInst::ICMP_NE, Op0,
7004 Constant::getNullValue(Op1->getType()));
7005 }
7006 break;
7007 }
7008 case ICmpInst::ICMP_SLT: {
7009 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
7010 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
7011 const APInt *CmpC;
7012 if (match(Op1, m_APInt(CmpC))) {
7013 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
7014 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
7015 ConstantInt::get(Op1->getType(), *CmpC - 1));
7016 }
7017 break;
7018 }
7019 case ICmpInst::ICMP_SGT: {
7020 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
7021 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
7022 const APInt *CmpC;
7023 if (match(Op1, m_APInt(CmpC))) {
7024 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
7025 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
7026 ConstantInt::get(Op1->getType(), *CmpC + 1));
7027 }
7028 break;
7029 }
7030 }
7031 }
7032
7033 // Based on the range information we know about the LHS, see if we can
7034 // simplify this comparison. For example, (x&4) < 8 is always true.
7035 switch (Pred) {
7036 default:
7037 break;
7038 case ICmpInst::ICMP_EQ:
7039 case ICmpInst::ICMP_NE: {
7040 // If all bits are known zero except for one, then we know at most one bit
7041 // is set. If the comparison is against zero, then this is a check to see if
7042 // *that* bit is set.
7043 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
7044 if (Op1Known.isZero()) {
7045 // If the LHS is an AND with the same constant, look through it.
7046 Value *LHS = nullptr;
7047 const APInt *LHSC;
7048 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
7049 *LHSC != Op0KnownZeroInverted)
7050 LHS = Op0;
7051
7052 Value *X;
7053 const APInt *C1;
7054 if (match(LHS, m_Shl(m_Power2(C1), m_Value(X)))) {
7055 Type *XTy = X->getType();
7056 unsigned Log2C1 = C1->countr_zero();
7057 APInt C2 = Op0KnownZeroInverted;
7058 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
7059 if (C2Pow2.isPowerOf2()) {
7060 // iff (C1 is pow2) & ((C2 & ~(C1-1)) + C1) is pow2):
7061 // ((C1 << X) & C2) == 0 -> X >= (Log2(C2+C1) - Log2(C1))
7062 // ((C1 << X) & C2) != 0 -> X < (Log2(C2+C1) - Log2(C1))
7063 unsigned Log2C2 = C2Pow2.countr_zero();
7064 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
7065 auto NewPred =
7067 return new ICmpInst(NewPred, X, CmpC);
7068 }
7069 }
7070 }
7071
7072 // Op0 eq C_Pow2 -> Op0 ne 0 if Op0 is known to be C_Pow2 or zero.
7073 if (Op1Known.isConstant() && Op1Known.getConstant().isPowerOf2() &&
7074 (Op0Known & Op1Known) == Op0Known)
7075 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
7076 ConstantInt::getNullValue(Op1->getType()));
7077 break;
7078 }
7079 case ICmpInst::ICMP_SGE:
7080 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
7081 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7082 break;
7083 case ICmpInst::ICMP_SLE:
7084 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
7085 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7086 break;
7087 case ICmpInst::ICMP_UGE:
7088 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
7089 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7090 break;
7091 case ICmpInst::ICMP_ULE:
7092 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
7093 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7094 break;
7095 }
7096
7097 // Turn a signed comparison into an unsigned one if both operands are known to
7098 // have the same sign. Set samesign if possible (except for equality
7099 // predicates).
7100 if ((I.isSigned() || (I.isUnsigned() && !I.hasSameSign())) &&
7101 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
7102 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) {
7103 I.setPredicate(I.getUnsignedPredicate());
7104 I.setSameSign();
7105 return &I;
7106 }
7107
7108 return nullptr;
7109}
7110
7111/// If one operand of an icmp is effectively a bool (value range of {0,1}),
7112/// then try to reduce patterns based on that limit.
7114 Value *X, *Y;
7115 CmpPredicate Pred;
7116
7117 // X must be 0 and bool must be true for "ULT":
7118 // X <u (zext i1 Y) --> (X == 0) & Y
7119 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_ZExt(m_Value(Y))))) &&
7120 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULT)
7121 return BinaryOperator::CreateAnd(Builder.CreateIsNull(X), Y);
7122
7123 // X must be 0 or bool must be true for "ULE":
7124 // X <=u (sext i1 Y) --> (X == 0) | Y
7125 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_SExt(m_Value(Y))))) &&
7126 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULE)
7127 return BinaryOperator::CreateOr(Builder.CreateIsNull(X), Y);
7128
7129 // icmp eq/ne X, (zext/sext (icmp eq/ne X, C))
7130 CmpPredicate Pred1, Pred2;
7131 const APInt *C;
7132 Instruction *ExtI;
7133 if (match(&I, m_c_ICmp(Pred1, m_Value(X),
7136 m_APInt(C)))))) &&
7137 ICmpInst::isEquality(Pred1) && ICmpInst::isEquality(Pred2)) {
7138 bool IsSExt = ExtI->getOpcode() == Instruction::SExt;
7139 bool HasOneUse = ExtI->hasOneUse() && ExtI->getOperand(0)->hasOneUse();
7140 auto CreateRangeCheck = [&] {
7141 Value *CmpV1 =
7142 Builder.CreateICmp(Pred1, X, Constant::getNullValue(X->getType()));
7143 Value *CmpV2 = Builder.CreateICmp(
7144 Pred1, X, ConstantInt::getSigned(X->getType(), IsSExt ? -1 : 1));
7146 Pred1 == ICmpInst::ICMP_EQ ? Instruction::Or : Instruction::And,
7147 CmpV1, CmpV2);
7148 };
7149 if (C->isZero()) {
7150 if (Pred2 == ICmpInst::ICMP_EQ) {
7151 // icmp eq X, (zext/sext (icmp eq X, 0)) --> false
7152 // icmp ne X, (zext/sext (icmp eq X, 0)) --> true
7153 return replaceInstUsesWith(
7154 I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7155 } else if (!IsSExt || HasOneUse) {
7156 // icmp eq X, (zext (icmp ne X, 0)) --> X == 0 || X == 1
7157 // icmp ne X, (zext (icmp ne X, 0)) --> X != 0 && X != 1
7158 // icmp eq X, (sext (icmp ne X, 0)) --> X == 0 || X == -1
7159 // icmp ne X, (sext (icmp ne X, 0)) --> X != 0 && X != -1
7160 return CreateRangeCheck();
7161 }
7162 } else if (IsSExt ? C->isAllOnes() : C->isOne()) {
7163 if (Pred2 == ICmpInst::ICMP_NE) {
7164 // icmp eq X, (zext (icmp ne X, 1)) --> false
7165 // icmp ne X, (zext (icmp ne X, 1)) --> true
7166 // icmp eq X, (sext (icmp ne X, -1)) --> false
7167 // icmp ne X, (sext (icmp ne X, -1)) --> true
7168 return replaceInstUsesWith(
7169 I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7170 } else if (!IsSExt || HasOneUse) {
7171 // icmp eq X, (zext (icmp eq X, 1)) --> X == 0 || X == 1
7172 // icmp ne X, (zext (icmp eq X, 1)) --> X != 0 && X != 1
7173 // icmp eq X, (sext (icmp eq X, -1)) --> X == 0 || X == -1
7174 // icmp ne X, (sext (icmp eq X, -1)) --> X != 0 && X == -1
7175 return CreateRangeCheck();
7176 }
7177 } else {
7178 // when C != 0 && C != 1:
7179 // icmp eq X, (zext (icmp eq X, C)) --> icmp eq X, 0
7180 // icmp eq X, (zext (icmp ne X, C)) --> icmp eq X, 1
7181 // icmp ne X, (zext (icmp eq X, C)) --> icmp ne X, 0
7182 // icmp ne X, (zext (icmp ne X, C)) --> icmp ne X, 1
7183 // when C != 0 && C != -1:
7184 // icmp eq X, (sext (icmp eq X, C)) --> icmp eq X, 0
7185 // icmp eq X, (sext (icmp ne X, C)) --> icmp eq X, -1
7186 // icmp ne X, (sext (icmp eq X, C)) --> icmp ne X, 0
7187 // icmp ne X, (sext (icmp ne X, C)) --> icmp ne X, -1
7188 return ICmpInst::Create(
7189 Instruction::ICmp, Pred1, X,
7190 ConstantInt::getSigned(X->getType(), Pred2 == ICmpInst::ICMP_NE
7191 ? (IsSExt ? -1 : 1)
7192 : 0));
7193 }
7194 }
7195
7196 return nullptr;
7197}
7198
7199/// If we have an icmp le or icmp ge instruction with a constant operand, turn
7200/// it into the appropriate icmp lt or icmp gt instruction. This transform
7201/// allows them to be folded in visitICmpInst.
7203 ICmpInst::Predicate Pred = I.getPredicate();
7204 if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
7206 return nullptr;
7207
7208 Value *Op0 = I.getOperand(0);
7209 Value *Op1 = I.getOperand(1);
7210 auto *Op1C = dyn_cast<Constant>(Op1);
7211 if (!Op1C)
7212 return nullptr;
7213
7214 auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
7215 if (!FlippedStrictness)
7216 return nullptr;
7217
7218 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7219}
7220
7221/// If we have a comparison with a non-canonical predicate, if we can update
7222/// all the users, invert the predicate and adjust all the users.
7224 // Is the predicate already canonical?
7225 CmpInst::Predicate Pred = I.getPredicate();
7227 return nullptr;
7228
7229 // Can all users be adjusted to predicate inversion?
7230 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
7231 return nullptr;
7232
7233 // Ok, we can canonicalize comparison!
7234 // Let's first invert the comparison's predicate.
7235 I.setPredicate(CmpInst::getInversePredicate(Pred));
7236 I.setName(I.getName() + ".not");
7237
7238 // And, adapt users.
7240
7241 return &I;
7242}
7243
7244/// Integer compare with boolean values can always be turned into bitwise ops.
7246 InstCombiner::BuilderTy &Builder) {
7247 Value *A = I.getOperand(0), *B = I.getOperand(1);
7248 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
7249
7250 // A boolean compared to true/false can be simplified to Op0/true/false in
7251 // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
7252 // Cases not handled by InstSimplify are always 'not' of Op0.
7253 if (match(B, m_Zero())) {
7254 switch (I.getPredicate()) {
7255 case CmpInst::ICMP_EQ: // A == 0 -> !A
7256 case CmpInst::ICMP_ULE: // A <=u 0 -> !A
7257 case CmpInst::ICMP_SGE: // A >=s 0 -> !A
7259 default:
7260 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7261 }
7262 } else if (match(B, m_One())) {
7263 switch (I.getPredicate()) {
7264 case CmpInst::ICMP_NE: // A != 1 -> !A
7265 case CmpInst::ICMP_ULT: // A <u 1 -> !A
7266 case CmpInst::ICMP_SGT: // A >s -1 -> !A
7268 default:
7269 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7270 }
7271 }
7272
7273 switch (I.getPredicate()) {
7274 default:
7275 llvm_unreachable("Invalid icmp instruction!");
7276 case ICmpInst::ICMP_EQ:
7277 // icmp eq i1 A, B -> ~(A ^ B)
7278 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
7279
7280 case ICmpInst::ICMP_NE:
7281 // icmp ne i1 A, B -> A ^ B
7282 return BinaryOperator::CreateXor(A, B);
7283
7284 case ICmpInst::ICMP_UGT:
7285 // icmp ugt -> icmp ult
7286 std::swap(A, B);
7287 [[fallthrough]];
7288 case ICmpInst::ICMP_ULT:
7289 // icmp ult i1 A, B -> ~A & B
7290 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
7291
7292 case ICmpInst::ICMP_SGT:
7293 // icmp sgt -> icmp slt
7294 std::swap(A, B);
7295 [[fallthrough]];
7296 case ICmpInst::ICMP_SLT:
7297 // icmp slt i1 A, B -> A & ~B
7298 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
7299
7300 case ICmpInst::ICMP_UGE:
7301 // icmp uge -> icmp ule
7302 std::swap(A, B);
7303 [[fallthrough]];
7304 case ICmpInst::ICMP_ULE:
7305 // icmp ule i1 A, B -> ~A | B
7306 return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
7307
7308 case ICmpInst::ICMP_SGE:
7309 // icmp sge -> icmp sle
7310 std::swap(A, B);
7311 [[fallthrough]];
7312 case ICmpInst::ICMP_SLE:
7313 // icmp sle i1 A, B -> A | ~B
7314 return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
7315 }
7316}
7317
7318// Transform pattern like:
7319// (1 << Y) u<= X or ~(-1 << Y) u< X or ((1 << Y)+(-1)) u< X
7320// (1 << Y) u> X or ~(-1 << Y) u>= X or ((1 << Y)+(-1)) u>= X
7321// Into:
7322// (X l>> Y) != 0
7323// (X l>> Y) == 0
7325 InstCombiner::BuilderTy &Builder) {
7326 CmpPredicate Pred, NewPred;
7327 Value *X, *Y;
7328 if (match(&Cmp,
7329 m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
7330 switch (Pred) {
7331 case ICmpInst::ICMP_ULE:
7332 NewPred = ICmpInst::ICMP_NE;
7333 break;
7334 case ICmpInst::ICMP_UGT:
7335 NewPred = ICmpInst::ICMP_EQ;
7336 break;
7337 default:
7338 return nullptr;
7339 }
7340 } else if (match(&Cmp, m_c_ICmp(Pred,
7343 m_Add(m_Shl(m_One(), m_Value(Y)),
7344 m_AllOnes()))),
7345 m_Value(X)))) {
7346 // The variant with 'add' is not canonical, (the variant with 'not' is)
7347 // we only get it because it has extra uses, and can't be canonicalized,
7348
7349 switch (Pred) {
7350 case ICmpInst::ICMP_ULT:
7351 NewPred = ICmpInst::ICMP_NE;
7352 break;
7353 case ICmpInst::ICMP_UGE:
7354 NewPred = ICmpInst::ICMP_EQ;
7355 break;
7356 default:
7357 return nullptr;
7358 }
7359 } else
7360 return nullptr;
7361
7362 Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
7363 Constant *Zero = Constant::getNullValue(NewX->getType());
7364 return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
7365}
7366
7368 InstCombiner::BuilderTy &Builder) {
7369 const CmpInst::Predicate Pred = Cmp.getPredicate();
7370 Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
7371 Value *V1, *V2;
7372
7373 auto createCmpReverse = [&](CmpInst::Predicate Pred, Value *X, Value *Y) {
7374 Value *V = Builder.CreateCmp(Pred, X, Y, Cmp.getName());
7375 if (auto *I = dyn_cast<Instruction>(V))
7376 I->copyIRFlags(&Cmp);
7377 Module *M = Cmp.getModule();
7379 M, Intrinsic::vector_reverse, V->getType());
7380 return CallInst::Create(F, V);
7381 };
7382
7383 if (match(LHS, m_VecReverse(m_Value(V1)))) {
7384 // cmp Pred, rev(V1), rev(V2) --> rev(cmp Pred, V1, V2)
7385 if (match(RHS, m_VecReverse(m_Value(V2))) &&
7386 (LHS->hasOneUse() || RHS->hasOneUse()))
7387 return createCmpReverse(Pred, V1, V2);
7388
7389 // cmp Pred, rev(V1), RHSSplat --> rev(cmp Pred, V1, RHSSplat)
7390 if (LHS->hasOneUse() && isSplatValue(RHS))
7391 return createCmpReverse(Pred, V1, RHS);
7392 }
7393 // cmp Pred, LHSSplat, rev(V2) --> rev(cmp Pred, LHSSplat, V2)
7394 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
7395 return createCmpReverse(Pred, LHS, V2);
7396
7397 ArrayRef<int> M;
7398 if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
7399 return nullptr;
7400
7401 // If both arguments of the cmp are shuffles that use the same mask and
7402 // shuffle within a single vector, move the shuffle after the cmp:
7403 // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
7404 Type *V1Ty = V1->getType();
7405 if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
7406 V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
7407 Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
7408 return new ShuffleVectorInst(NewCmp, M);
7409 }
7410
7411 // Try to canonicalize compare with splatted operand and splat constant.
7412 // TODO: We could generalize this for more than splats. See/use the code in
7413 // InstCombiner::foldVectorBinop().
7414 Constant *C;
7415 if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
7416 return nullptr;
7417
7418 // Length-changing splats are ok, so adjust the constants as needed:
7419 // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
7420 Constant *ScalarC = C->getSplatValue(/* AllowPoison */ true);
7421 int MaskSplatIndex;
7422 if (ScalarC && match(M, m_SplatOrPoisonMask(MaskSplatIndex))) {
7423 // We allow poison in matching, but this transform removes it for safety.
7424 // Demanded elements analysis should be able to recover some/all of that.
7425 C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
7426 ScalarC);
7427 SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
7428 Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
7429 return new ShuffleVectorInst(NewCmp, NewM);
7430 }
7431
7432 return nullptr;
7433}
7434
7435// extract(uadd.with.overflow(A, B), 0) ult A
7436// -> extract(uadd.with.overflow(A, B), 1)
7438 CmpInst::Predicate Pred = I.getPredicate();
7439 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7440
7441 Value *UAddOv;
7442 Value *A, *B;
7443 auto UAddOvResultPat = m_ExtractValue<0>(
7445 if (match(Op0, UAddOvResultPat) &&
7446 ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
7447 (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
7448 (match(A, m_One()) || match(B, m_One()))) ||
7449 (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
7450 (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
7451 // extract(uadd.with.overflow(A, B), 0) < A
7452 // extract(uadd.with.overflow(A, 1), 0) == 0
7453 // extract(uadd.with.overflow(A, -1), 0) != -1
7454 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
7455 else if (match(Op1, UAddOvResultPat) && Pred == ICmpInst::ICMP_UGT &&
7456 (Op0 == A || Op0 == B))
7457 // A > extract(uadd.with.overflow(A, B), 0)
7458 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
7459 else
7460 return nullptr;
7461
7462 return ExtractValueInst::Create(UAddOv, 1);
7463}
7464
7466 if (!I.getOperand(0)->getType()->isPointerTy() ||
7468 I.getParent()->getParent(),
7469 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7470 return nullptr;
7471 }
7472 Instruction *Op;
7473 if (match(I.getOperand(0), m_Instruction(Op)) &&
7474 match(I.getOperand(1), m_Zero()) &&
7475 Op->isLaunderOrStripInvariantGroup()) {
7476 return ICmpInst::Create(Instruction::ICmp, I.getPredicate(),
7477 Op->getOperand(0), I.getOperand(1));
7478 }
7479 return nullptr;
7480}
7481
7482/// This function folds patterns produced by lowering of reduce idioms, such as
7483/// llvm.vector.reduce.and which are lowered into instruction chains. This code
7484/// attempts to generate fewer number of scalar comparisons instead of vector
7485/// comparisons when possible.
7487 InstCombiner::BuilderTy &Builder,
7488 const DataLayout &DL) {
7489 if (I.getType()->isVectorTy())
7490 return nullptr;
7491 CmpPredicate OuterPred, InnerPred;
7492 Value *LHS, *RHS;
7493
7494 // Match lowering of @llvm.vector.reduce.and. Turn
7495 /// %vec_ne = icmp ne <8 x i8> %lhs, %rhs
7496 /// %scalar_ne = bitcast <8 x i1> %vec_ne to i8
7497 /// %res = icmp <pred> i8 %scalar_ne, 0
7498 ///
7499 /// into
7500 ///
7501 /// %lhs.scalar = bitcast <8 x i8> %lhs to i64
7502 /// %rhs.scalar = bitcast <8 x i8> %rhs to i64
7503 /// %res = icmp <pred> i64 %lhs.scalar, %rhs.scalar
7504 ///
7505 /// for <pred> in {ne, eq}.
7506 if (!match(&I, m_ICmp(OuterPred,
7508 m_ICmp(InnerPred, m_Value(LHS), m_Value(RHS))))),
7509 m_Zero())))
7510 return nullptr;
7511 auto *LHSTy = dyn_cast<FixedVectorType>(LHS->getType());
7512 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7513 return nullptr;
7514 unsigned NumBits =
7515 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7516 // TODO: Relax this to "not wider than max legal integer type"?
7517 if (!DL.isLegalInteger(NumBits))
7518 return nullptr;
7519
7520 if (ICmpInst::isEquality(OuterPred) && InnerPred == ICmpInst::ICMP_NE) {
7521 auto *ScalarTy = Builder.getIntNTy(NumBits);
7522 LHS = Builder.CreateBitCast(LHS, ScalarTy, LHS->getName() + ".scalar");
7523 RHS = Builder.CreateBitCast(RHS, ScalarTy, RHS->getName() + ".scalar");
7524 return ICmpInst::Create(Instruction::ICmp, OuterPred, LHS, RHS,
7525 I.getName());
7526 }
7527
7528 return nullptr;
7529}
7530
7531// This helper will be called with icmp operands in both orders.
7533 Value *Op0, Value *Op1,
7534 ICmpInst &CxtI) {
7535 // Try to optimize 'icmp GEP, P' or 'icmp P, GEP'.
7536 if (auto *GEP = dyn_cast<GEPOperator>(Op0))
7537 if (Instruction *NI = foldGEPICmp(GEP, Op1, Pred, CxtI))
7538 return NI;
7539
7540 if (auto *SI = dyn_cast<SelectInst>(Op0))
7541 if (Instruction *NI = foldSelectICmp(Pred, SI, Op1, CxtI))
7542 return NI;
7543
7544 if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0)) {
7545 if (Instruction *Res = foldICmpWithMinMax(CxtI, MinMax, Op1, Pred))
7546 return Res;
7547
7548 if (Instruction *Res = foldICmpWithClamp(CxtI, Op1, MinMax))
7549 return Res;
7550 }
7551
7552 {
7553 Value *X;
7554 const APInt *C;
7555 // icmp X+Cst, X
7556 if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
7557 return foldICmpAddOpConst(X, *C, Pred);
7558 }
7559
7560 // abs(X) >= X --> true
7561 // abs(X) u<= X --> true
7562 // abs(X) < X --> false
7563 // abs(X) u> X --> false
7564 // abs(X) u>= X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7565 // abs(X) <= X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7566 // abs(X) == X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7567 // abs(X) u< X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7568 // abs(X) > X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7569 // abs(X) != X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7570 {
7571 Value *X;
7572 Constant *C;
7574 match(Op1, m_Specific(X))) {
7575 Value *NullValue = Constant::getNullValue(X->getType());
7576 Value *AllOnesValue = Constant::getAllOnesValue(X->getType());
7577 const APInt SMin =
7578 APInt::getSignedMinValue(X->getType()->getScalarSizeInBits());
7579 bool IsIntMinPosion = C->isAllOnesValue();
7580 switch (Pred) {
7581 case CmpInst::ICMP_ULE:
7582 case CmpInst::ICMP_SGE:
7583 return replaceInstUsesWith(CxtI, ConstantInt::getTrue(CxtI.getType()));
7584 case CmpInst::ICMP_UGT:
7585 case CmpInst::ICMP_SLT:
7587 case CmpInst::ICMP_UGE:
7588 case CmpInst::ICMP_SLE:
7589 case CmpInst::ICMP_EQ: {
7590 return replaceInstUsesWith(
7591 CxtI, IsIntMinPosion
7592 ? Builder.CreateICmpSGT(X, AllOnesValue)
7593 : Builder.CreateICmpULT(
7594 X, ConstantInt::get(X->getType(), SMin + 1)));
7595 }
7596 case CmpInst::ICMP_ULT:
7597 case CmpInst::ICMP_SGT:
7598 case CmpInst::ICMP_NE: {
7599 return replaceInstUsesWith(
7600 CxtI, IsIntMinPosion
7601 ? Builder.CreateICmpSLT(X, NullValue)
7602 : Builder.CreateICmpUGT(
7603 X, ConstantInt::get(X->getType(), SMin)));
7604 }
7605 default:
7606 llvm_unreachable("Invalid predicate!");
7607 }
7608 }
7609 }
7610
7611 const SimplifyQuery Q = SQ.getWithInstruction(&CxtI);
7612 if (Value *V = foldICmpWithLowBitMaskedVal(Pred, Op0, Op1, Q, *this))
7613 return replaceInstUsesWith(CxtI, V);
7614
7615 // Folding (X / Y) pred X => X swap(pred) 0 for constant Y other than 0 or 1
7616 auto CheckUGT1 = [](const APInt &Divisor) { return Divisor.ugt(1); };
7617 {
7618 if (match(Op0, m_UDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7619 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7621 }
7622
7623 if (!ICmpInst::isUnsigned(Pred) &&
7624 match(Op0, m_SDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7625 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7627 }
7628 }
7629
7630 // Another case of this fold is (X >> Y) pred X => X swap(pred) 0 if Y != 0
7631 auto CheckNE0 = [](const APInt &Shift) { return !Shift.isZero(); };
7632 {
7633 if (match(Op0, m_LShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7634 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7636 }
7637
7638 if ((Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SGE) &&
7639 match(Op0, m_AShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7640 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7642 }
7643 }
7644
7645 return nullptr;
7646}
7647
7649 bool Changed = false;
7650 const SimplifyQuery Q = SQ.getWithInstruction(&I);
7651 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7652 unsigned Op0Cplxity = getComplexity(Op0);
7653 unsigned Op1Cplxity = getComplexity(Op1);
7654
7655 /// Orders the operands of the compare so that they are listed from most
7656 /// complex to least complex. This puts constants before unary operators,
7657 /// before binary operators.
7658 if (Op0Cplxity < Op1Cplxity) {
7659 I.swapOperands();
7660 std::swap(Op0, Op1);
7661 Changed = true;
7662 }
7663
7664 if (Value *V = simplifyICmpInst(I.getCmpPredicate(), Op0, Op1, Q))
7665 return replaceInstUsesWith(I, V);
7666
7667 // Comparing -val or val with non-zero is the same as just comparing val
7668 // ie, abs(val) != 0 -> val != 0
7669 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
7670 Value *Cond, *SelectTrue, *SelectFalse;
7671 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
7672 m_Value(SelectFalse)))) {
7673 if (Value *V = dyn_castNegVal(SelectTrue)) {
7674 if (V == SelectFalse)
7675 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7676 } else if (Value *V = dyn_castNegVal(SelectFalse)) {
7677 if (V == SelectTrue)
7678 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7679 }
7680 }
7681 }
7682
7684 return Res;
7685
7686 if (Op0->getType()->isIntOrIntVectorTy(1))
7688 return Res;
7689
7691 return Res;
7692
7694 return Res;
7695
7697 return Res;
7698
7700 return Res;
7701
7703 return Res;
7704
7706 return Res;
7707
7709 return Res;
7710
7711 // Test if the ICmpInst instruction is used exclusively by a select as
7712 // part of a minimum or maximum operation. If so, refrain from doing
7713 // any other folding. This helps out other analyses which understand
7714 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
7715 // and CodeGen. And in this case, at least one of the comparison
7716 // operands has at least one user besides the compare (the select),
7717 // which would often largely negate the benefit of folding anyway.
7718 //
7719 // Do the same for the other patterns recognized by matchSelectPattern.
7720 if (I.hasOneUse())
7721 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
7722 Value *A, *B;
7724 if (SPR.Flavor != SPF_UNKNOWN)
7725 return nullptr;
7726 }
7727
7728 // Do this after checking for min/max to prevent infinite looping.
7729 if (Instruction *Res = foldICmpWithZero(I))
7730 return Res;
7731
7732 // FIXME: We only do this after checking for min/max to prevent infinite
7733 // looping caused by a reverse canonicalization of these patterns for min/max.
7734 // FIXME: The organization of folds is a mess. These would naturally go into
7735 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
7736 // down here after the min/max restriction.
7737 ICmpInst::Predicate Pred = I.getPredicate();
7738 const APInt *C;
7739 if (match(Op1, m_APInt(C))) {
7740 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
7741 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
7742 Constant *Zero = Constant::getNullValue(Op0->getType());
7743 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
7744 }
7745
7746 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
7747 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
7749 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
7750 }
7751 }
7752
7753 // The folds in here may rely on wrapping flags and special constants, so
7754 // they can break up min/max idioms in some cases but not seemingly similar
7755 // patterns.
7756 // FIXME: It may be possible to enhance select folding to make this
7757 // unnecessary. It may also be moot if we canonicalize to min/max
7758 // intrinsics.
7759 if (Instruction *Res = foldICmpBinOp(I, Q))
7760 return Res;
7761
7763 return Res;
7764
7765 // Try to match comparison as a sign bit test. Intentionally do this after
7766 // foldICmpInstWithConstant() to potentially let other folds to happen first.
7767 if (Instruction *New = foldSignBitTest(I))
7768 return New;
7769
7770 if (auto *PN = dyn_cast<PHINode>(Op0))
7771 if (Instruction *NV = foldOpIntoPhi(I, PN))
7772 return NV;
7773 if (auto *PN = dyn_cast<PHINode>(Op1))
7774 if (Instruction *NV = foldOpIntoPhi(I, PN))
7775 return NV;
7776
7778 return Res;
7779
7780 if (Instruction *Res = foldICmpCommutative(I.getCmpPredicate(), Op0, Op1, I))
7781 return Res;
7782 if (Instruction *Res =
7783 foldICmpCommutative(I.getSwappedCmpPredicate(), Op1, Op0, I))
7784 return Res;
7785
7786 if (I.isCommutative()) {
7787 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
7788 replaceOperand(I, 0, Pair->first);
7789 replaceOperand(I, 1, Pair->second);
7790 return &I;
7791 }
7792 }
7793
7794 // In case of a comparison with two select instructions having the same
7795 // condition, check whether one of the resulting branches can be simplified.
7796 // If so, just compare the other branch and select the appropriate result.
7797 // For example:
7798 // %tmp1 = select i1 %cmp, i32 %y, i32 %x
7799 // %tmp2 = select i1 %cmp, i32 %z, i32 %x
7800 // %cmp2 = icmp slt i32 %tmp2, %tmp1
7801 // The icmp will result false for the false value of selects and the result
7802 // will depend upon the comparison of true values of selects if %cmp is
7803 // true. Thus, transform this into:
7804 // %cmp = icmp slt i32 %y, %z
7805 // %sel = select i1 %cond, i1 %cmp, i1 false
7806 // This handles similar cases to transform.
7807 {
7808 Value *Cond, *A, *B, *C, *D;
7809 if (match(Op0, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
7811 (Op0->hasOneUse() || Op1->hasOneUse())) {
7812 // Check whether comparison of TrueValues can be simplified
7813 if (Value *Res = simplifyICmpInst(Pred, A, C, SQ)) {
7814 Value *NewICMP = Builder.CreateICmp(Pred, B, D);
7815 return SelectInst::Create(Cond, Res, NewICMP);
7816 }
7817 // Check whether comparison of FalseValues can be simplified
7818 if (Value *Res = simplifyICmpInst(Pred, B, D, SQ)) {
7819 Value *NewICMP = Builder.CreateICmp(Pred, A, C);
7820 return SelectInst::Create(Cond, NewICMP, Res);
7821 }
7822 }
7823 }
7824
7825 // icmp slt (sub nsw x, y), (add nsw x, y) --> icmp sgt y, 0
7826 // icmp ult (sub nuw x, y), (add nuw x, y) --> icmp ugt y, 0
7827 // icmp eq (sub nsw/nuw x, y), (add nsw/nuw x, y) --> icmp eq y, 0
7828 {
7829 Value *A, *B;
7830 CmpPredicate CmpPred;
7831 if (match(&I, m_c_ICmp(CmpPred, m_Sub(m_Value(A), m_Value(B)),
7833 auto *I0 = cast<OverflowingBinaryOperator>(Op0);
7834 auto *I1 = cast<OverflowingBinaryOperator>(Op1);
7835 bool I0NUW = I0->hasNoUnsignedWrap();
7836 bool I1NUW = I1->hasNoUnsignedWrap();
7837 bool I0NSW = I0->hasNoSignedWrap();
7838 bool I1NSW = I1->hasNoSignedWrap();
7839 if ((ICmpInst::isUnsigned(Pred) && I0NUW && I1NUW) ||
7840 (ICmpInst::isSigned(Pred) && I0NSW && I1NSW) ||
7841 (ICmpInst::isEquality(Pred) &&
7842 ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
7843 return new ICmpInst(CmpPredicate::getSwapped(CmpPred), B,
7844 ConstantInt::get(Op0->getType(), 0));
7845 }
7846 }
7847 }
7848
7849 // Try to optimize equality comparisons against alloca-based pointers.
7850 if (Op0->getType()->isPointerTy() && I.isEquality()) {
7851 assert(Op1->getType()->isPointerTy() &&
7852 "Comparing pointer with non-pointer?");
7853 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
7854 if (foldAllocaCmp(Alloca))
7855 return nullptr;
7856 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
7857 if (foldAllocaCmp(Alloca))
7858 return nullptr;
7859 }
7860
7861 if (Instruction *Res = foldICmpBitCast(I))
7862 return Res;
7863
7864 // TODO: Hoist this above the min/max bailout.
7866 return R;
7867
7868 {
7869 Value *X, *Y;
7870 // Transform (X & ~Y) == 0 --> (X & Y) != 0
7871 // and (X & ~Y) != 0 --> (X & Y) == 0
7872 // if A is a power of 2.
7873 if (match(Op0, m_And(m_Value(X), m_Not(m_Value(Y)))) &&
7874 match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(X, false, &I) &&
7875 I.isEquality())
7876 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
7877 Op1);
7878
7879 // Op0 pred Op1 -> ~Op1 pred ~Op0, if this allows us to drop an instruction.
7880 if (Op0->getType()->isIntOrIntVectorTy()) {
7881 bool ConsumesOp0, ConsumesOp1;
7882 if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
7883 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
7884 (ConsumesOp0 || ConsumesOp1)) {
7885 Value *InvOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
7886 Value *InvOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
7887 assert(InvOp0 && InvOp1 &&
7888 "Mismatch between isFreeToInvert and getFreelyInverted");
7889 return new ICmpInst(I.getSwappedPredicate(), InvOp0, InvOp1);
7890 }
7891 }
7892
7893 Instruction *AddI = nullptr;
7895 m_Instruction(AddI))) &&
7896 isa<IntegerType>(X->getType())) {
7897 Value *Result;
7898 Constant *Overflow;
7899 // m_UAddWithOverflow can match patterns that do not include an explicit
7900 // "add" instruction, so check the opcode of the matched op.
7901 if (AddI->getOpcode() == Instruction::Add &&
7902 OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, X, Y, *AddI,
7903 Result, Overflow)) {
7904 replaceInstUsesWith(*AddI, Result);
7905 eraseInstFromFunction(*AddI);
7906 return replaceInstUsesWith(I, Overflow);
7907 }
7908 }
7909
7910 // (zext X) * (zext Y) --> llvm.umul.with.overflow.
7911 if (match(Op0, m_NUWMul(m_ZExt(m_Value(X)), m_ZExt(m_Value(Y)))) &&
7912 match(Op1, m_APInt(C))) {
7913 if (Instruction *R = processUMulZExtIdiom(I, Op0, C, *this))
7914 return R;
7915 }
7916
7917 // Signbit test folds
7918 // Fold (X u>> BitWidth - 1 Pred ZExt(i1)) --> X s< 0 Pred i1
7919 // Fold (X s>> BitWidth - 1 Pred SExt(i1)) --> X s< 0 Pred i1
7920 Instruction *ExtI;
7921 if ((I.isUnsigned() || I.isEquality()) &&
7922 match(Op1,
7924 Y->getType()->getScalarSizeInBits() == 1 &&
7925 (Op0->hasOneUse() || Op1->hasOneUse())) {
7926 unsigned OpWidth = Op0->getType()->getScalarSizeInBits();
7927 Instruction *ShiftI;
7928 if (match(Op0, m_CombineAnd(m_Instruction(ShiftI),
7930 OpWidth - 1))))) {
7931 unsigned ExtOpc = ExtI->getOpcode();
7932 unsigned ShiftOpc = ShiftI->getOpcode();
7933 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
7934 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
7935 Value *SLTZero =
7936 Builder.CreateICmpSLT(X, Constant::getNullValue(X->getType()));
7937 Value *Cmp = Builder.CreateICmp(Pred, SLTZero, Y, I.getName());
7938 return replaceInstUsesWith(I, Cmp);
7939 }
7940 }
7941 }
7942 }
7943
7944 if (Instruction *Res = foldICmpEquality(I))
7945 return Res;
7946
7948 return Res;
7949
7950 if (Instruction *Res = foldICmpOfUAddOv(I))
7951 return Res;
7952
7953 // The 'cmpxchg' instruction returns an aggregate containing the old value and
7954 // an i1 which indicates whether or not we successfully did the swap.
7955 //
7956 // Replace comparisons between the old value and the expected value with the
7957 // indicator that 'cmpxchg' returns.
7958 //
7959 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
7960 // spuriously fail. In those cases, the old value may equal the expected
7961 // value but it is possible for the swap to not occur.
7962 if (I.getPredicate() == ICmpInst::ICMP_EQ)
7963 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
7964 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
7965 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
7966 !ACXI->isWeak())
7967 return ExtractValueInst::Create(ACXI, 1);
7968
7970 return Res;
7971
7972 if (I.getType()->isVectorTy())
7973 if (Instruction *Res = foldVectorCmp(I, Builder))
7974 return Res;
7975
7977 return Res;
7978
7980 return Res;
7981
7982 {
7983 Value *A;
7984 const APInt *C1, *C2;
7985 ICmpInst::Predicate Pred = I.getPredicate();
7986 if (ICmpInst::isEquality(Pred)) {
7987 // sext(a) & c1 == c2 --> a & c3 == trunc(c2)
7988 // sext(a) & c1 != c2 --> a & c3 != trunc(c2)
7989 if (match(Op0, m_And(m_SExt(m_Value(A)), m_APInt(C1))) &&
7990 match(Op1, m_APInt(C2))) {
7991 Type *InputTy = A->getType();
7992 unsigned InputBitWidth = InputTy->getScalarSizeInBits();
7993 // c2 must be non-negative at the bitwidth of a.
7994 if (C2->getActiveBits() < InputBitWidth) {
7995 APInt TruncC1 = C1->trunc(InputBitWidth);
7996 // Check if there are 1s in C1 high bits of size InputBitWidth.
7997 if (C1->uge(APInt::getOneBitSet(C1->getBitWidth(), InputBitWidth)))
7998 TruncC1.setBit(InputBitWidth - 1);
7999 Value *AndInst = Builder.CreateAnd(A, TruncC1);
8000 return new ICmpInst(
8001 Pred, AndInst,
8002 ConstantInt::get(InputTy, C2->trunc(InputBitWidth)));
8003 }
8004 }
8005 }
8006 }
8007
8008 return Changed ? &I : nullptr;
8009}
8010
8011/// Fold fcmp ([us]itofp x, cst) if possible.
8013 Instruction *LHSI,
8014 Constant *RHSC) {
8015 const APFloat *RHS;
8016 if (!match(RHSC, m_APFloat(RHS)))
8017 return nullptr;
8018
8019 // Get the width of the mantissa. We don't want to hack on conversions that
8020 // might lose information from the integer, e.g. "i64 -> float"
8021 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
8022 if (MantissaWidth == -1)
8023 return nullptr; // Unknown.
8024
8025 Type *IntTy = LHSI->getOperand(0)->getType();
8026 unsigned IntWidth = IntTy->getScalarSizeInBits();
8027 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
8028
8029 if (I.isEquality()) {
8030 FCmpInst::Predicate P = I.getPredicate();
8031 bool IsExact = false;
8032 APSInt RHSCvt(IntWidth, LHSUnsigned);
8033 RHS->convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
8034
8035 // If the floating point constant isn't an integer value, we know if we will
8036 // ever compare equal / not equal to it.
8037 if (!IsExact) {
8038 // TODO: Can never be -0.0 and other non-representable values
8039 APFloat RHSRoundInt(*RHS);
8041 if (*RHS != RHSRoundInt) {
8043 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8044
8046 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8047 }
8048 }
8049
8050 // TODO: If the constant is exactly representable, is it always OK to do
8051 // equality compares as integer?
8052 }
8053
8054 // Check to see that the input is converted from an integer type that is small
8055 // enough that preserves all bits. TODO: check here for "known" sign bits.
8056 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
8057
8058 // Following test does NOT adjust IntWidth downwards for signed inputs,
8059 // because the most negative value still requires all the mantissa bits
8060 // to distinguish it from one less than that value.
8061 if ((int)IntWidth > MantissaWidth) {
8062 // Conversion would lose accuracy. Check if loss can impact comparison.
8063 int Exp = ilogb(*RHS);
8064 if (Exp == APFloat::IEK_Inf) {
8065 int MaxExponent = ilogb(APFloat::getLargest(RHS->getSemantics()));
8066 if (MaxExponent < (int)IntWidth - !LHSUnsigned)
8067 // Conversion could create infinity.
8068 return nullptr;
8069 } else {
8070 // Note that if RHS is zero or NaN, then Exp is negative
8071 // and first condition is trivially false.
8072 if (MantissaWidth <= Exp && Exp <= (int)IntWidth - !LHSUnsigned)
8073 // Conversion could affect comparison.
8074 return nullptr;
8075 }
8076 }
8077
8078 // Otherwise, we can potentially simplify the comparison. We know that it
8079 // will always come through as an integer value and we know the constant is
8080 // not a NAN (it would have been previously simplified).
8081 assert(!RHS->isNaN() && "NaN comparison not already folded!");
8082
8084 switch (I.getPredicate()) {
8085 default:
8086 llvm_unreachable("Unexpected predicate!");
8087 case FCmpInst::FCMP_UEQ:
8088 case FCmpInst::FCMP_OEQ:
8089 Pred = ICmpInst::ICMP_EQ;
8090 break;
8091 case FCmpInst::FCMP_UGT:
8092 case FCmpInst::FCMP_OGT:
8093 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
8094 break;
8095 case FCmpInst::FCMP_UGE:
8096 case FCmpInst::FCMP_OGE:
8097 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
8098 break;
8099 case FCmpInst::FCMP_ULT:
8100 case FCmpInst::FCMP_OLT:
8101 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
8102 break;
8103 case FCmpInst::FCMP_ULE:
8104 case FCmpInst::FCMP_OLE:
8105 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
8106 break;
8107 case FCmpInst::FCMP_UNE:
8108 case FCmpInst::FCMP_ONE:
8109 Pred = ICmpInst::ICMP_NE;
8110 break;
8111 case FCmpInst::FCMP_ORD:
8112 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8113 case FCmpInst::FCMP_UNO:
8114 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8115 }
8116
8117 // Now we know that the APFloat is a normal number, zero or inf.
8118
8119 // See if the FP constant is too large for the integer. For example,
8120 // comparing an i8 to 300.0.
8121 if (!LHSUnsigned) {
8122 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
8123 // and large values.
8124 APFloat SMax(RHS->getSemantics());
8125 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
8127 if (SMax < *RHS) { // smax < 13123.0
8128 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
8129 Pred == ICmpInst::ICMP_SLE)
8130 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8131 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8132 }
8133 } else {
8134 // If the RHS value is > UnsignedMax, fold the comparison. This handles
8135 // +INF and large values.
8136 APFloat UMax(RHS->getSemantics());
8137 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
8139 if (UMax < *RHS) { // umax < 13123.0
8140 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
8141 Pred == ICmpInst::ICMP_ULE)
8142 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8143 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8144 }
8145 }
8146
8147 if (!LHSUnsigned) {
8148 // See if the RHS value is < SignedMin.
8149 APFloat SMin(RHS->getSemantics());
8150 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
8152 if (SMin > *RHS) { // smin > 12312.0
8153 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
8154 Pred == ICmpInst::ICMP_SGE)
8155 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8156 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8157 }
8158 } else {
8159 // See if the RHS value is < UnsignedMin.
8160 APFloat UMin(RHS->getSemantics());
8161 UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
8163 if (UMin > *RHS) { // umin > 12312.0
8164 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
8165 Pred == ICmpInst::ICMP_UGE)
8166 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8167 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8168 }
8169 }
8170
8171 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
8172 // [0, UMAX], but it may still be fractional. Check whether this is the case
8173 // using the IsExact flag.
8174 // Don't do this for zero, because -0.0 is not fractional.
8175 APSInt RHSInt(IntWidth, LHSUnsigned);
8176 bool IsExact;
8177 RHS->convertToInteger(RHSInt, APFloat::rmTowardZero, &IsExact);
8178 if (!RHS->isZero()) {
8179 if (!IsExact) {
8180 // If we had a comparison against a fractional value, we have to adjust
8181 // the compare predicate and sometimes the value. RHSC is rounded towards
8182 // zero at this point.
8183 switch (Pred) {
8184 default:
8185 llvm_unreachable("Unexpected integer comparison!");
8186 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
8187 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8188 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
8189 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8190 case ICmpInst::ICMP_ULE:
8191 // (float)int <= 4.4 --> int <= 4
8192 // (float)int <= -4.4 --> false
8193 if (RHS->isNegative())
8194 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8195 break;
8196 case ICmpInst::ICMP_SLE:
8197 // (float)int <= 4.4 --> int <= 4
8198 // (float)int <= -4.4 --> int < -4
8199 if (RHS->isNegative())
8200 Pred = ICmpInst::ICMP_SLT;
8201 break;
8202 case ICmpInst::ICMP_ULT:
8203 // (float)int < -4.4 --> false
8204 // (float)int < 4.4 --> int <= 4
8205 if (RHS->isNegative())
8206 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8207 Pred = ICmpInst::ICMP_ULE;
8208 break;
8209 case ICmpInst::ICMP_SLT:
8210 // (float)int < -4.4 --> int < -4
8211 // (float)int < 4.4 --> int <= 4
8212 if (!RHS->isNegative())
8213 Pred = ICmpInst::ICMP_SLE;
8214 break;
8215 case ICmpInst::ICMP_UGT:
8216 // (float)int > 4.4 --> int > 4
8217 // (float)int > -4.4 --> true
8218 if (RHS->isNegative())
8219 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8220 break;
8221 case ICmpInst::ICMP_SGT:
8222 // (float)int > 4.4 --> int > 4
8223 // (float)int > -4.4 --> int >= -4
8224 if (RHS->isNegative())
8225 Pred = ICmpInst::ICMP_SGE;
8226 break;
8227 case ICmpInst::ICMP_UGE:
8228 // (float)int >= -4.4 --> true
8229 // (float)int >= 4.4 --> int > 4
8230 if (RHS->isNegative())
8231 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8232 Pred = ICmpInst::ICMP_UGT;
8233 break;
8234 case ICmpInst::ICMP_SGE:
8235 // (float)int >= -4.4 --> int >= -4
8236 // (float)int >= 4.4 --> int > 4
8237 if (!RHS->isNegative())
8238 Pred = ICmpInst::ICMP_SGT;
8239 break;
8240 }
8241 }
8242 }
8243
8244 // Lower this FP comparison into an appropriate integer version of the
8245 // comparison.
8246 return new ICmpInst(Pred, LHSI->getOperand(0),
8247 ConstantInt::get(LHSI->getOperand(0)->getType(), RHSInt));
8248}
8249
8250/// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
8252 Constant *RHSC) {
8253 // When C is not 0.0 and infinities are not allowed:
8254 // (C / X) < 0.0 is a sign-bit test of X
8255 // (C / X) < 0.0 --> X < 0.0 (if C is positive)
8256 // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
8257 //
8258 // Proof:
8259 // Multiply (C / X) < 0.0 by X * X / C.
8260 // - X is non zero, if it is the flag 'ninf' is violated.
8261 // - C defines the sign of X * X * C. Thus it also defines whether to swap
8262 // the predicate. C is also non zero by definition.
8263 //
8264 // Thus X * X / C is non zero and the transformation is valid. [qed]
8265
8266 FCmpInst::Predicate Pred = I.getPredicate();
8267
8268 // Check that predicates are valid.
8269 if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
8270 (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
8271 return nullptr;
8272
8273 // Check that RHS operand is zero.
8274 if (!match(RHSC, m_AnyZeroFP()))
8275 return nullptr;
8276
8277 // Check fastmath flags ('ninf').
8278 if (!LHSI->hasNoInfs() || !I.hasNoInfs())
8279 return nullptr;
8280
8281 // Check the properties of the dividend. It must not be zero to avoid a
8282 // division by zero (see Proof).
8283 const APFloat *C;
8284 if (!match(LHSI->getOperand(0), m_APFloat(C)))
8285 return nullptr;
8286
8287 if (C->isZero())
8288 return nullptr;
8289
8290 // Get swapped predicate if necessary.
8291 if (C->isNegative())
8292 Pred = I.getSwappedPredicate();
8293
8294 return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
8295}
8296
8297// Transform 'fptrunc(x) cmp C' to 'x cmp ext(C)' if possible.
8298// Patterns include:
8299// fptrunc(x) < C --> x < ext(C)
8300// fptrunc(x) <= C --> x <= ext(C)
8301// fptrunc(x) > C --> x > ext(C)
8302// fptrunc(x) >= C --> x >= ext(C)
8303// where 'ext(C)' is the extension of 'C' to the type of 'x' with a small bias
8304// due to precision loss.
8306 const Constant &C) {
8307 FCmpInst::Predicate Pred = I.getPredicate();
8308 bool RoundDown = false;
8309
8310 if (Pred == FCmpInst::FCMP_OGE || Pred == FCmpInst::FCMP_UGE ||
8311 Pred == FCmpInst::FCMP_OLT || Pred == FCmpInst::FCMP_ULT)
8312 RoundDown = true;
8313 else if (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_UGT ||
8314 Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)
8315 RoundDown = false;
8316 else
8317 return nullptr;
8318
8319 const APFloat *CValue;
8320 if (!match(&C, m_APFloat(CValue)))
8321 return nullptr;
8322
8323 if (CValue->isNaN() || CValue->isInfinity())
8324 return nullptr;
8325
8326 auto ConvertFltSema = [](const APFloat &Src, const fltSemantics &Sema) {
8327 bool LosesInfo;
8328 APFloat Dest = Src;
8329 Dest.convert(Sema, APFloat::rmNearestTiesToEven, &LosesInfo);
8330 return Dest;
8331 };
8332
8333 auto NextValue = [](const APFloat &Value, bool RoundDown) {
8334 APFloat NextValue = Value;
8335 NextValue.next(RoundDown);
8336 return NextValue;
8337 };
8338
8339 APFloat NextCValue = NextValue(*CValue, RoundDown);
8340
8341 Type *DestType = FPTrunc.getOperand(0)->getType();
8342 const fltSemantics &DestFltSema =
8343 DestType->getScalarType()->getFltSemantics();
8344
8345 APFloat ExtCValue = ConvertFltSema(*CValue, DestFltSema);
8346 APFloat ExtNextCValue = ConvertFltSema(NextCValue, DestFltSema);
8347
8348 // When 'NextCValue' is infinity, use an imaged 'NextCValue' that equals
8349 // 'CValue + bias' to avoid the infinity after conversion. The bias is
8350 // estimated as 'CValue - PrevCValue', where 'PrevCValue' is the previous
8351 // value of 'CValue'.
8352 if (NextCValue.isInfinity()) {
8353 APFloat PrevCValue = NextValue(*CValue, !RoundDown);
8354 APFloat Bias = ConvertFltSema(*CValue - PrevCValue, DestFltSema);
8355
8356 ExtNextCValue = ExtCValue + Bias;
8357 }
8358
8359 APFloat ExtMidValue =
8360 scalbn(ExtCValue + ExtNextCValue, -1, APFloat::rmNearestTiesToEven);
8361
8362 const fltSemantics &SrcFltSema =
8363 C.getType()->getScalarType()->getFltSemantics();
8364
8365 // 'MidValue' might be rounded to 'NextCValue'. Correct it here.
8366 APFloat MidValue = ConvertFltSema(ExtMidValue, SrcFltSema);
8367 if (MidValue != *CValue)
8368 ExtMidValue.next(!RoundDown);
8369
8370 // Check whether 'ExtMidValue' is a valid result since the assumption on
8371 // imaged 'NextCValue' might not hold for new float types.
8372 // ppc_fp128 can't pass here when converting from max float because of
8373 // APFloat implementation.
8374 if (NextCValue.isInfinity()) {
8375 // ExtMidValue --- narrowed ---> Finite
8376 if (ConvertFltSema(ExtMidValue, SrcFltSema).isInfinity())
8377 return nullptr;
8378
8379 // NextExtMidValue --- narrowed ---> Infinity
8380 APFloat NextExtMidValue = NextValue(ExtMidValue, RoundDown);
8381 if (ConvertFltSema(NextExtMidValue, SrcFltSema).isFinite())
8382 return nullptr;
8383 }
8384
8385 return new FCmpInst(Pred, FPTrunc.getOperand(0),
8386 ConstantFP::get(DestType, ExtMidValue), "", &I);
8387}
8388
8389/// Optimize fabs(X) compared with zero.
8391 Value *X;
8392 if (!match(I.getOperand(0), m_FAbs(m_Value(X))))
8393 return nullptr;
8394
8395 const APFloat *C;
8396 if (!match(I.getOperand(1), m_APFloat(C)))
8397 return nullptr;
8398
8399 if (!C->isPosZero()) {
8400 if (!C->isSmallestNormalized())
8401 return nullptr;
8402
8403 const Function *F = I.getFunction();
8404 DenormalMode Mode = F->getDenormalMode(C->getSemantics());
8405 if (Mode.Input == DenormalMode::PreserveSign ||
8407
8408 auto replaceFCmp = [](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8409 Constant *Zero = ConstantFP::getZero(X->getType());
8410 return new FCmpInst(P, X, Zero, "", I);
8411 };
8412
8413 switch (I.getPredicate()) {
8414 case FCmpInst::FCMP_OLT:
8415 // fcmp olt fabs(x), smallest_normalized_number -> fcmp oeq x, 0.0
8416 return replaceFCmp(&I, FCmpInst::FCMP_OEQ, X);
8417 case FCmpInst::FCMP_UGE:
8418 // fcmp uge fabs(x), smallest_normalized_number -> fcmp une x, 0.0
8419 return replaceFCmp(&I, FCmpInst::FCMP_UNE, X);
8420 case FCmpInst::FCMP_OGE:
8421 // fcmp oge fabs(x), smallest_normalized_number -> fcmp one x, 0.0
8422 return replaceFCmp(&I, FCmpInst::FCMP_ONE, X);
8423 case FCmpInst::FCMP_ULT:
8424 // fcmp ult fabs(x), smallest_normalized_number -> fcmp ueq x, 0.0
8425 return replaceFCmp(&I, FCmpInst::FCMP_UEQ, X);
8426 default:
8427 break;
8428 }
8429 }
8430
8431 return nullptr;
8432 }
8433
8434 auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8435 I->setPredicate(P);
8436 return IC.replaceOperand(*I, 0, X);
8437 };
8438
8439 switch (I.getPredicate()) {
8440 case FCmpInst::FCMP_UGE:
8441 case FCmpInst::FCMP_OLT:
8442 // fabs(X) >= 0.0 --> true
8443 // fabs(X) < 0.0 --> false
8444 llvm_unreachable("fcmp should have simplified");
8445
8446 case FCmpInst::FCMP_OGT:
8447 // fabs(X) > 0.0 --> X != 0.0
8448 return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
8449
8450 case FCmpInst::FCMP_UGT:
8451 // fabs(X) u> 0.0 --> X u!= 0.0
8452 return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
8453
8454 case FCmpInst::FCMP_OLE:
8455 // fabs(X) <= 0.0 --> X == 0.0
8456 return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
8457
8458 case FCmpInst::FCMP_ULE:
8459 // fabs(X) u<= 0.0 --> X u== 0.0
8460 return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
8461
8462 case FCmpInst::FCMP_OGE:
8463 // fabs(X) >= 0.0 --> !isnan(X)
8464 assert(!I.hasNoNaNs() && "fcmp should have simplified");
8465 return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
8466
8467 case FCmpInst::FCMP_ULT:
8468 // fabs(X) u< 0.0 --> isnan(X)
8469 assert(!I.hasNoNaNs() && "fcmp should have simplified");
8470 return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
8471
8472 case FCmpInst::FCMP_OEQ:
8473 case FCmpInst::FCMP_UEQ:
8474 case FCmpInst::FCMP_ONE:
8475 case FCmpInst::FCMP_UNE:
8476 case FCmpInst::FCMP_ORD:
8477 case FCmpInst::FCMP_UNO:
8478 // Look through the fabs() because it doesn't change anything but the sign.
8479 // fabs(X) == 0.0 --> X == 0.0,
8480 // fabs(X) != 0.0 --> X != 0.0
8481 // isnan(fabs(X)) --> isnan(X)
8482 // !isnan(fabs(X) --> !isnan(X)
8483 return replacePredAndOp0(&I, I.getPredicate(), X);
8484
8485 default:
8486 return nullptr;
8487 }
8488}
8489
8490/// Optimize sqrt(X) compared with zero.
8492 Value *X;
8493 if (!match(I.getOperand(0), m_Sqrt(m_Value(X))))
8494 return nullptr;
8495
8496 if (!match(I.getOperand(1), m_PosZeroFP()))
8497 return nullptr;
8498
8499 auto ReplacePredAndOp0 = [&](FCmpInst::Predicate P) {
8500 I.setPredicate(P);
8501 return IC.replaceOperand(I, 0, X);
8502 };
8503
8504 // Clear ninf flag if sqrt doesn't have it.
8505 if (!cast<Instruction>(I.getOperand(0))->hasNoInfs())
8506 I.setHasNoInfs(false);
8507
8508 switch (I.getPredicate()) {
8509 case FCmpInst::FCMP_OLT:
8510 case FCmpInst::FCMP_UGE:
8511 // sqrt(X) < 0.0 --> false
8512 // sqrt(X) u>= 0.0 --> true
8513 llvm_unreachable("fcmp should have simplified");
8514 case FCmpInst::FCMP_ULT:
8515 case FCmpInst::FCMP_ULE:
8516 case FCmpInst::FCMP_OGT:
8517 case FCmpInst::FCMP_OGE:
8518 case FCmpInst::FCMP_OEQ:
8519 case FCmpInst::FCMP_UNE:
8520 // sqrt(X) u< 0.0 --> X u< 0.0
8521 // sqrt(X) u<= 0.0 --> X u<= 0.0
8522 // sqrt(X) > 0.0 --> X > 0.0
8523 // sqrt(X) >= 0.0 --> X >= 0.0
8524 // sqrt(X) == 0.0 --> X == 0.0
8525 // sqrt(X) u!= 0.0 --> X u!= 0.0
8526 return IC.replaceOperand(I, 0, X);
8527
8528 case FCmpInst::FCMP_OLE:
8529 // sqrt(X) <= 0.0 --> X == 0.0
8530 return ReplacePredAndOp0(FCmpInst::FCMP_OEQ);
8531 case FCmpInst::FCMP_UGT:
8532 // sqrt(X) u> 0.0 --> X u!= 0.0
8533 return ReplacePredAndOp0(FCmpInst::FCMP_UNE);
8534 case FCmpInst::FCMP_UEQ:
8535 // sqrt(X) u== 0.0 --> X u<= 0.0
8536 return ReplacePredAndOp0(FCmpInst::FCMP_ULE);
8537 case FCmpInst::FCMP_ONE:
8538 // sqrt(X) != 0.0 --> X > 0.0
8539 return ReplacePredAndOp0(FCmpInst::FCMP_OGT);
8540 case FCmpInst::FCMP_ORD:
8541 // !isnan(sqrt(X)) --> X >= 0.0
8542 return ReplacePredAndOp0(FCmpInst::FCMP_OGE);
8543 case FCmpInst::FCMP_UNO:
8544 // isnan(sqrt(X)) --> X u< 0.0
8545 return ReplacePredAndOp0(FCmpInst::FCMP_ULT);
8546 default:
8547 llvm_unreachable("Unexpected predicate!");
8548 }
8549}
8550
8552 CmpInst::Predicate Pred = I.getPredicate();
8553 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8554
8555 // Canonicalize fneg as Op1.
8556 if (match(Op0, m_FNeg(m_Value())) && !match(Op1, m_FNeg(m_Value()))) {
8557 std::swap(Op0, Op1);
8558 Pred = I.getSwappedPredicate();
8559 }
8560
8561 if (!match(Op1, m_FNeg(m_Specific(Op0))))
8562 return nullptr;
8563
8564 // Replace the negated operand with 0.0:
8565 // fcmp Pred Op0, -Op0 --> fcmp Pred Op0, 0.0
8566 Constant *Zero = ConstantFP::getZero(Op0->getType());
8567 return new FCmpInst(Pred, Op0, Zero, "", &I);
8568}
8569
8571 Constant *RHSC, InstCombinerImpl &CI) {
8572 const CmpInst::Predicate Pred = I.getPredicate();
8573 Value *X = LHSI->getOperand(0);
8574 Value *Y = LHSI->getOperand(1);
8575 switch (Pred) {
8576 default:
8577 break;
8578 case FCmpInst::FCMP_UGT:
8579 case FCmpInst::FCMP_ULT:
8580 case FCmpInst::FCMP_UNE:
8581 case FCmpInst::FCMP_OEQ:
8582 case FCmpInst::FCMP_OGE:
8583 case FCmpInst::FCMP_OLE:
8584 // The optimization is not valid if X and Y are infinities of the same
8585 // sign, i.e. the inf - inf = nan case. If the fsub has the ninf or nnan
8586 // flag then we can assume we do not have that case. Otherwise we might be
8587 // able to prove that either X or Y is not infinity.
8588 if (!LHSI->hasNoNaNs() && !LHSI->hasNoInfs() &&
8592 break;
8593
8594 [[fallthrough]];
8595 case FCmpInst::FCMP_OGT:
8596 case FCmpInst::FCMP_OLT:
8597 case FCmpInst::FCMP_ONE:
8598 case FCmpInst::FCMP_UEQ:
8599 case FCmpInst::FCMP_UGE:
8600 case FCmpInst::FCMP_ULE:
8601 // fcmp pred (x - y), 0 --> fcmp pred x, y
8602 if (match(RHSC, m_AnyZeroFP()) &&
8603 I.getFunction()->getDenormalMode(
8604 LHSI->getType()->getScalarType()->getFltSemantics()) ==
8606 CI.replaceOperand(I, 0, X);
8607 CI.replaceOperand(I, 1, Y);
8608 I.setHasNoInfs(LHSI->hasNoInfs());
8609 if (LHSI->hasNoNaNs())
8610 I.setHasNoNaNs(true);
8611 return &I;
8612 }
8613 break;
8614 }
8615
8616 return nullptr;
8617}
8618
8620 InstCombinerImpl &IC) {
8621 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
8622 Type *OpType = LHS->getType();
8623 CmpInst::Predicate Pred = I.getPredicate();
8624
8627
8628 if (!FloorX && !CeilX) {
8631 std::swap(LHS, RHS);
8632 Pred = I.getSwappedPredicate();
8633 }
8634 }
8635
8636 switch (Pred) {
8637 case FCmpInst::FCMP_OLE:
8638 // fcmp ole floor(x), x => fcmp ord x, 0
8639 if (FloorX)
8641 "", &I);
8642 break;
8643 case FCmpInst::FCMP_OGT:
8644 // fcmp ogt floor(x), x => false
8645 if (FloorX)
8646 return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8647 break;
8648 case FCmpInst::FCMP_OGE:
8649 // fcmp oge ceil(x), x => fcmp ord x, 0
8650 if (CeilX)
8652 "", &I);
8653 break;
8654 case FCmpInst::FCMP_OLT:
8655 // fcmp olt ceil(x), x => false
8656 if (CeilX)
8657 return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8658 break;
8659 case FCmpInst::FCMP_ULE:
8660 // fcmp ule floor(x), x => true
8661 if (FloorX)
8662 return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8663 break;
8664 case FCmpInst::FCMP_UGT:
8665 // fcmp ugt floor(x), x => fcmp uno x, 0
8666 if (FloorX)
8668 "", &I);
8669 break;
8670 case FCmpInst::FCMP_UGE:
8671 // fcmp uge ceil(x), x => true
8672 if (CeilX)
8673 return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8674 break;
8675 case FCmpInst::FCMP_ULT:
8676 // fcmp ult ceil(x), x => fcmp uno x, 0
8677 if (CeilX)
8679 "", &I);
8680 break;
8681 default:
8682 break;
8683 }
8684
8685 return nullptr;
8686}
8687
8688/// Returns true if a select that implements a min/max is redundant and
8689/// select result can be replaced with its non-constant operand, e.g.,
8690/// select ( (si/ui-to-fp A) <= C ), C, (si/ui-to-fp A)
8691/// where C is the FP constant equal to the minimum integer value
8692/// representable by A.
8694 Value *B) {
8695 const APFloat *APF;
8696 if (!match(B, m_APFloat(APF)))
8697 return false;
8698
8699 auto *I = dyn_cast<Instruction>(A);
8700 if (!I || !(I->getOpcode() == Instruction::SIToFP ||
8701 I->getOpcode() == Instruction::UIToFP))
8702 return false;
8703
8704 bool IsUnsigned = I->getOpcode() == Instruction::UIToFP;
8705 unsigned BitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
8706 APSInt IntBoundary = (Flavor == SPF_FMAXNUM)
8707 ? APSInt::getMinValue(BitWidth, IsUnsigned)
8708 : APSInt::getMaxValue(BitWidth, IsUnsigned);
8709 APSInt ConvertedInt(BitWidth, IsUnsigned);
8710 bool IsExact;
8712 APF->convertToInteger(ConvertedInt, APFloat::rmTowardZero, &IsExact);
8713 return Status == APFloat::opOK && IsExact && ConvertedInt == IntBoundary;
8714}
8715
8717 bool Changed = false;
8718
8719 /// Orders the operands of the compare so that they are listed from most
8720 /// complex to least complex. This puts constants before unary operators,
8721 /// before binary operators.
8722 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
8723 I.swapOperands();
8724 Changed = true;
8725 }
8726
8727 const CmpInst::Predicate Pred = I.getPredicate();
8728 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8729 if (Value *V = simplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
8730 SQ.getWithInstruction(&I)))
8731 return replaceInstUsesWith(I, V);
8732
8733 // Simplify 'fcmp pred X, X'
8734 Type *OpType = Op0->getType();
8735 assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
8736 if (Op0 == Op1) {
8737 switch (Pred) {
8738 default:
8739 break;
8740 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
8741 case FCmpInst::FCMP_ULT: // True if unordered or less than
8742 case FCmpInst::FCMP_UGT: // True if unordered or greater than
8743 case FCmpInst::FCMP_UNE: // True if unordered or not equal
8744 // Canonicalize these to be 'fcmp uno %X, 0.0'.
8745 I.setPredicate(FCmpInst::FCMP_UNO);
8746 I.setOperand(1, Constant::getNullValue(OpType));
8747 return &I;
8748
8749 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
8750 case FCmpInst::FCMP_OEQ: // True if ordered and equal
8751 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
8752 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
8753 // Canonicalize these to be 'fcmp ord %X, 0.0'.
8754 I.setPredicate(FCmpInst::FCMP_ORD);
8755 I.setOperand(1, Constant::getNullValue(OpType));
8756 return &I;
8757 }
8758 }
8759
8760 if (I.isCommutative()) {
8761 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
8762 replaceOperand(I, 0, Pair->first);
8763 replaceOperand(I, 1, Pair->second);
8764 return &I;
8765 }
8766 }
8767
8768 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
8769 // then canonicalize the operand to 0.0.
8770 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
8771 if (!match(Op0, m_PosZeroFP()) &&
8772 isKnownNeverNaN(Op0, getSimplifyQuery().getWithInstruction(&I)))
8773 return replaceOperand(I, 0, ConstantFP::getZero(OpType));
8774
8775 if (!match(Op1, m_PosZeroFP()) &&
8776 isKnownNeverNaN(Op1, getSimplifyQuery().getWithInstruction(&I)))
8777 return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8778 }
8779
8780 // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
8781 Value *X, *Y;
8782 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
8783 return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
8784
8786 return R;
8787
8788 // Test if the FCmpInst instruction is used exclusively by a select as
8789 // part of a minimum or maximum operation. If so, refrain from doing
8790 // any other folding. This helps out other analyses which understand
8791 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
8792 // and CodeGen. And in this case, at least one of the comparison
8793 // operands has at least one user besides the compare (the select),
8794 // which would often largely negate the benefit of folding anyway.
8795 if (I.hasOneUse())
8796 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
8797 Value *A, *B;
8799 bool IsRedundantMinMaxClamp =
8800 (SPR.Flavor == SPF_FMAXNUM || SPR.Flavor == SPF_FMINNUM) &&
8802 if (SPR.Flavor != SPF_UNKNOWN && !IsRedundantMinMaxClamp)
8803 return nullptr;
8804 }
8805
8806 // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
8807 // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
8808 if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
8809 return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8810
8811 // Canonicalize:
8812 // fcmp olt X, +inf -> fcmp one X, +inf
8813 // fcmp ole X, +inf -> fcmp ord X, 0
8814 // fcmp ogt X, +inf -> false
8815 // fcmp oge X, +inf -> fcmp oeq X, +inf
8816 // fcmp ult X, +inf -> fcmp une X, +inf
8817 // fcmp ule X, +inf -> true
8818 // fcmp ugt X, +inf -> fcmp uno X, 0
8819 // fcmp uge X, +inf -> fcmp ueq X, +inf
8820 // fcmp olt X, -inf -> false
8821 // fcmp ole X, -inf -> fcmp oeq X, -inf
8822 // fcmp ogt X, -inf -> fcmp one X, -inf
8823 // fcmp oge X, -inf -> fcmp ord X, 0
8824 // fcmp ult X, -inf -> fcmp uno X, 0
8825 // fcmp ule X, -inf -> fcmp ueq X, -inf
8826 // fcmp ugt X, -inf -> fcmp une X, -inf
8827 // fcmp uge X, -inf -> true
8828 const APFloat *C;
8829 if (match(Op1, m_APFloat(C)) && C->isInfinity()) {
8830 switch (C->isNegative() ? FCmpInst::getSwappedPredicate(Pred) : Pred) {
8831 default:
8832 break;
8833 case FCmpInst::FCMP_ORD:
8834 case FCmpInst::FCMP_UNO:
8837 case FCmpInst::FCMP_OGT:
8838 case FCmpInst::FCMP_ULE:
8839 llvm_unreachable("Should be simplified by InstSimplify");
8840 case FCmpInst::FCMP_OLT:
8841 return new FCmpInst(FCmpInst::FCMP_ONE, Op0, Op1, "", &I);
8842 case FCmpInst::FCMP_OLE:
8843 return new FCmpInst(FCmpInst::FCMP_ORD, Op0, ConstantFP::getZero(OpType),
8844 "", &I);
8845 case FCmpInst::FCMP_OGE:
8846 return new FCmpInst(FCmpInst::FCMP_OEQ, Op0, Op1, "", &I);
8847 case FCmpInst::FCMP_ULT:
8848 return new FCmpInst(FCmpInst::FCMP_UNE, Op0, Op1, "", &I);
8849 case FCmpInst::FCMP_UGT:
8850 return new FCmpInst(FCmpInst::FCMP_UNO, Op0, ConstantFP::getZero(OpType),
8851 "", &I);
8852 case FCmpInst::FCMP_UGE:
8853 return new FCmpInst(FCmpInst::FCMP_UEQ, Op0, Op1, "", &I);
8854 }
8855 }
8856
8857 // Ignore signbit of bitcasted int when comparing equality to FP 0.0:
8858 // fcmp oeq/une (bitcast X), 0.0 --> (and X, SignMaskC) ==/!= 0
8859 if (match(Op1, m_PosZeroFP()) &&
8862 if (Pred == FCmpInst::FCMP_OEQ)
8863 IntPred = ICmpInst::ICMP_EQ;
8864 else if (Pred == FCmpInst::FCMP_UNE)
8865 IntPred = ICmpInst::ICMP_NE;
8866
8867 if (IntPred != ICmpInst::BAD_ICMP_PREDICATE) {
8868 Type *IntTy = X->getType();
8869 const APInt &SignMask = ~APInt::getSignMask(IntTy->getScalarSizeInBits());
8870 Value *MaskX = Builder.CreateAnd(X, ConstantInt::get(IntTy, SignMask));
8871 return new ICmpInst(IntPred, MaskX, ConstantInt::getNullValue(IntTy));
8872 }
8873 }
8874
8875 // Handle fcmp with instruction LHS and constant RHS.
8876 Instruction *LHSI;
8877 Constant *RHSC;
8878 if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
8879 switch (LHSI->getOpcode()) {
8880 case Instruction::Select:
8881 // fcmp eq (cond ? x : -x), 0 --> fcmp eq x, 0
8882 if (FCmpInst::isEquality(Pred) && match(RHSC, m_AnyZeroFP()) &&
8884 return replaceOperand(I, 0, X);
8886 return NV;
8887 break;
8888 case Instruction::FSub:
8889 if (LHSI->hasOneUse())
8890 if (Instruction *NV = foldFCmpFSubIntoFCmp(I, LHSI, RHSC, *this))
8891 return NV;
8892 break;
8893 case Instruction::PHI:
8894 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
8895 return NV;
8896 break;
8897 case Instruction::SIToFP:
8898 case Instruction::UIToFP:
8899 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
8900 return NV;
8901 break;
8902 case Instruction::FDiv:
8903 if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
8904 return NV;
8905 break;
8906 case Instruction::Load:
8907 if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
8908 if (Instruction *Res =
8910 return Res;
8911 break;
8912 case Instruction::FPTrunc:
8913 if (Instruction *NV = foldFCmpFpTrunc(I, *LHSI, *RHSC))
8914 return NV;
8915 break;
8916 }
8917 }
8918
8919 if (Instruction *R = foldFabsWithFcmpZero(I, *this))
8920 return R;
8921
8922 if (Instruction *R = foldSqrtWithFcmpZero(I, *this))
8923 return R;
8924
8925 if (Instruction *R = foldFCmpWithFloorAndCeil(I, *this))
8926 return R;
8927
8928 if (match(Op0, m_FNeg(m_Value(X)))) {
8929 // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
8930 Constant *C;
8931 if (match(Op1, m_Constant(C)))
8932 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
8933 return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
8934 }
8935
8936 // fcmp (fadd X, 0.0), Y --> fcmp X, Y
8937 if (match(Op0, m_FAdd(m_Value(X), m_AnyZeroFP())))
8938 return new FCmpInst(Pred, X, Op1, "", &I);
8939
8940 // fcmp X, (fadd Y, 0.0) --> fcmp X, Y
8941 if (match(Op1, m_FAdd(m_Value(Y), m_AnyZeroFP())))
8942 return new FCmpInst(Pred, Op0, Y, "", &I);
8943
8944 if (match(Op0, m_FPExt(m_Value(X)))) {
8945 // fcmp (fpext X), (fpext Y) -> fcmp X, Y
8946 if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
8947 return new FCmpInst(Pred, X, Y, "", &I);
8948
8949 const APFloat *C;
8950 if (match(Op1, m_APFloat(C))) {
8951 const fltSemantics &FPSem =
8952 X->getType()->getScalarType()->getFltSemantics();
8953 bool Lossy;
8954 APFloat TruncC = *C;
8955 TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
8956
8957 if (Lossy) {
8958 // X can't possibly equal the higher-precision constant, so reduce any
8959 // equality comparison.
8960 // TODO: Other predicates can be handled via getFCmpCode().
8961 switch (Pred) {
8962 case FCmpInst::FCMP_OEQ:
8963 // X is ordered and equal to an impossible constant --> false
8964 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8965 case FCmpInst::FCMP_ONE:
8966 // X is ordered and not equal to an impossible constant --> ordered
8967 return new FCmpInst(FCmpInst::FCMP_ORD, X,
8968 ConstantFP::getZero(X->getType()));
8969 case FCmpInst::FCMP_UEQ:
8970 // X is unordered or equal to an impossible constant --> unordered
8971 return new FCmpInst(FCmpInst::FCMP_UNO, X,
8972 ConstantFP::getZero(X->getType()));
8973 case FCmpInst::FCMP_UNE:
8974 // X is unordered or not equal to an impossible constant --> true
8975 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8976 default:
8977 break;
8978 }
8979 }
8980
8981 // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
8982 // Avoid lossy conversions and denormals.
8983 // Zero is a special case that's OK to convert.
8984 APFloat Fabs = TruncC;
8985 Fabs.clearSign();
8986 if (!Lossy &&
8987 (Fabs.isZero() || !(Fabs < APFloat::getSmallestNormalized(FPSem)))) {
8988 Constant *NewC = ConstantFP::get(X->getType(), TruncC);
8989 return new FCmpInst(Pred, X, NewC, "", &I);
8990 }
8991 }
8992 }
8993
8994 // Convert a sign-bit test of an FP value into a cast and integer compare.
8995 // TODO: Simplify if the copysign constant is 0.0 or NaN.
8996 // TODO: Handle non-zero compare constants.
8997 // TODO: Handle other predicates.
8999 m_Value(X)))) &&
9000 match(Op1, m_AnyZeroFP()) && !C->isZero() && !C->isNaN()) {
9001 Type *IntType = Builder.getIntNTy(X->getType()->getScalarSizeInBits());
9002 if (auto *VecTy = dyn_cast<VectorType>(OpType))
9003 IntType = VectorType::get(IntType, VecTy->getElementCount());
9004
9005 // copysign(non-zero constant, X) < 0.0 --> (bitcast X) < 0
9006 if (Pred == FCmpInst::FCMP_OLT) {
9007 Value *IntX = Builder.CreateBitCast(X, IntType);
9008 return new ICmpInst(ICmpInst::ICMP_SLT, IntX,
9009 ConstantInt::getNullValue(IntType));
9010 }
9011 }
9012
9013 {
9014 Value *CanonLHS = nullptr;
9016 // (canonicalize(x) == x) => (x == x)
9017 if (CanonLHS == Op1)
9018 return new FCmpInst(Pred, Op1, Op1, "", &I);
9019
9020 Value *CanonRHS = nullptr;
9022 // (x == canonicalize(x)) => (x == x)
9023 if (CanonRHS == Op0)
9024 return new FCmpInst(Pred, Op0, Op0, "", &I);
9025
9026 // (canonicalize(x) == canonicalize(y)) => (x == y)
9027 if (CanonLHS && CanonRHS)
9028 return new FCmpInst(Pred, CanonLHS, CanonRHS, "", &I);
9029 }
9030
9031 if (I.getType()->isVectorTy())
9032 if (Instruction *Res = foldVectorCmp(I, Builder))
9033 return Res;
9034
9035 return Changed ? &I : nullptr;
9036}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define Check(C,...)
Hexagon Common GEP
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static void collectOffsetOp(Value *V, SmallVectorImpl< OffsetOp > &Offsets, bool AllowRecursion)
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static bool isMinMaxCmpSelectEliminable(SelectPatternFlavor Flavor, Value *A, Value *B)
Returns true if a select that implements a min/max is redundant and select result can be replaced wit...
static Instruction * foldICmpEqualityWithOffset(ICmpInst &I, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Offset both sides of an equality icmp to see if we can save some instructions: icmp eq/ne X,...
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldFCmpFpTrunc(FCmpInst &I, const Instruction &FPTrunc, const Constant &C)
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q)
Return true if X is a multiple of C.
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T1
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Value * RHS
Value * LHS
BinaryOperator * Mul
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
opStatus
IEEE-754R 7: Default exception handling.
Definition APFloat.h:360
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:6053
void clearSign()
Definition APFloat.h:1280
bool isNaN() const
Definition APFloat.h:1429
bool isZero() const
Definition APFloat.h:1427
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
Definition APFloat.h:1140
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1120
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
Definition APFloat.h:1314
opStatus next(bool nextDown)
Definition APFloat.h:1236
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1080
LLVM_ABI FPClassTest classify() const
Return the FPClassTest which will return true for the value.
Definition APFloat.cpp:5982
opStatus roundToIntegral(roundingMode RM)
Definition APFloat.h:1230
bool isInfinity() const
Definition APFloat.h:1428
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1573
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1758
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition APInt.h:450
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1513
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
APInt abs() const
Get the absolute value.
Definition APInt.h:1796
unsigned ceilLogBase2() const
Definition APInt.h:1765
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1948
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1183
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
Definition APInt.h:467
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition APInt.h:217
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
bool eq(const APInt &RHS) const
Equality comparison.
Definition APInt.h:1080
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1644
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1935
void negate()
Negate this APInt in place.
Definition APInt.h:1469
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
void flipAllBits()
Toggle every bit to its opposite value.
Definition APInt.h:1453
unsigned countl_one() const
Count the number of leading one bits.
Definition APInt.h:1616
unsigned logBase2() const
Definition APInt.h:1762
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:874
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1238
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1941
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
unsigned countr_one() const
Count the number of trailing one bits.
Definition APInt.h:1657
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1222
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
Definition APSInt.h:312
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
Definition APSInt.h:304
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
static LLVM_ABI Predicate getFlippedStrictnessPredicate(Predicate pred)
This is a static version that you can use without an instruction available.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI bool isStrictPredicate(Predicate predicate)
This is a static version that you can use without an instruction available.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
Definition Constants.h:269
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate Pred)
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition Operator.h:430
LLVM_ABI Type * getSourceElementType() const
Definition Operator.cpp:71
Value * getPointerOperand()
Definition Operator.h:457
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
Definition Operator.h:504
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2442
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2794
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min)
Match and fold patterns like: icmp eq/ne X, min(max(X, Lo), Hi) which represents a range check and ca...
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp)
Fold icmp eq (num + mask) & ~mask, num to icmp eq (and num, mask), 0 Where mask is a low bit mask.
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
SimplifyQuery SQ
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
const DataLayout & DL
DomConditionCache DC
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
BuilderTy & Builder
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isShift() const
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static bool isMin(Intrinsic::ID ID)
Whether the intrinsic is a smin or umin.
static bool isSigned(Intrinsic::ID ID)
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
bool contains(const_arg_type key) const
Check if the SetVector contains the given key.
Definition SetVector.h:252
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
Definition Type.h:165
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
Definition Type.cpp:235
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
void setOperand(unsigned i, Value *Val)
Definition User.h:238
Value * getOperand(unsigned i) const
Definition User.h:233
unsigned getNumOperands() const
Definition User.h:255
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:158
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
Definition APInt.cpp:2763
LLVM_ABI APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
Definition APInt.cpp:2781
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
Definition APFloat.h:1516
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition bit.h:236
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UNKNOWN
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
Definition Loads.cpp:893
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
Definition APFloat.h:1525
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2009
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2156
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
@ Continue
Definition DWP.h:22
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define NC
Definition regutils.h:42
Value * materialize(InstCombiner::BuilderTy &Builder) const
static OffsetResult value(Value *V)
static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV)
static OffsetResult invalid()
This callback is used in conjunction with PointerMayBeCaptured.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:80
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:245
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:277
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
Definition KnownBits.h:154
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:292
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
bool isConstant() const
Returns true if we know the value of all bits.
Definition KnownBits.h:54
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:251
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:148
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:132
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
Definition KnownBits.h:114
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:289
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
Definition KnownBits.h:138
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition KnownBits.h:60
Linear expression BasePtr + Index * Scale + Offset.
Definition Loads.h:203
GEPNoWrapFlags Flags
Definition Loads.h:208
Matching combinators.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
const Instruction * CxtI
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
AssumptionCache * AC
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Capture information for a specific Use.