LLVM 22.0.0git
InstCombineCompares.cpp
Go to the documentation of this file.
1//===- InstCombineCompares.cpp --------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitICmp and visitFCmp functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/APSInt.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/Loads.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/InstrTypes.h"
34#include <bitset>
35
36using namespace llvm;
37using namespace PatternMatch;
38
39#define DEBUG_TYPE "instcombine"
40
41// How many times is a select replaced by one of its operands?
42STATISTIC(NumSel, "Number of select opts");
43
44/// Compute Result = In1+In2, returning true if the result overflowed for this
45/// type.
46static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
47 bool IsSigned = false) {
48 bool Overflow;
49 if (IsSigned)
50 Result = In1.sadd_ov(In2, Overflow);
51 else
52 Result = In1.uadd_ov(In2, Overflow);
53
54 return Overflow;
55}
56
57/// Compute Result = In1-In2, returning true if the result overflowed for this
58/// type.
59static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
60 bool IsSigned = false) {
61 bool Overflow;
62 if (IsSigned)
63 Result = In1.ssub_ov(In2, Overflow);
64 else
65 Result = In1.usub_ov(In2, Overflow);
66
67 return Overflow;
68}
69
70/// Given an icmp instruction, return true if any use of this comparison is a
71/// branch on sign bit comparison.
72static bool hasBranchUse(ICmpInst &I) {
73 for (auto *U : I.users())
74 if (isa<BranchInst>(U))
75 return true;
76 return false;
77}
78
79/// Returns true if the exploded icmp can be expressed as a signed comparison
80/// to zero and updates the predicate accordingly.
81/// The signedness of the comparison is preserved.
82/// TODO: Refactor with decomposeBitTestICmp()?
83static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
84 if (!ICmpInst::isSigned(Pred))
85 return false;
86
87 if (C.isZero())
88 return ICmpInst::isRelational(Pred);
89
90 if (C.isOne()) {
91 if (Pred == ICmpInst::ICMP_SLT) {
92 Pred = ICmpInst::ICMP_SLE;
93 return true;
94 }
95 } else if (C.isAllOnes()) {
96 if (Pred == ICmpInst::ICMP_SGT) {
97 Pred = ICmpInst::ICMP_SGE;
98 return true;
99 }
100 }
101
102 return false;
103}
104
105/// This is called when we see this pattern:
106/// cmp pred (load (gep GV, ...)), cmpcst
107/// where GV is a global variable with a constant initializer. Try to simplify
108/// this into some simple computation that does not need the load. For example
109/// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
110///
111/// If AndCst is non-null, then the loaded value is masked with that constant
112/// before doing the comparison. This handles cases like "A[i]&4 == 0".
114 LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst) {
116 if (LI->isVolatile() || !GV || !GV->isConstant() ||
117 !GV->hasDefinitiveInitializer())
118 return nullptr;
119
120 Type *EltTy = LI->getType();
121 TypeSize EltSize = DL.getTypeStoreSize(EltTy);
122 if (EltSize.isScalable())
123 return nullptr;
124
126 if (!Expr.Index || Expr.BasePtr != GV || Expr.Offset.getBitWidth() > 64)
127 return nullptr;
128
129 Constant *Init = GV->getInitializer();
130 TypeSize GlobalSize = DL.getTypeAllocSize(Init->getType());
131
132 Value *Idx = Expr.Index;
133 const APInt &Stride = Expr.Scale;
134 const APInt &ConstOffset = Expr.Offset;
135
136 // Allow an additional context offset, but only within the stride.
137 if (!ConstOffset.ult(Stride))
138 return nullptr;
139
140 // Don't handle overlapping loads for now.
141 if (!Stride.uge(EltSize.getFixedValue()))
142 return nullptr;
143
144 // Don't blow up on huge arrays.
145 uint64_t ArrayElementCount =
146 divideCeil((GlobalSize.getFixedValue() - ConstOffset.getZExtValue()),
147 Stride.getZExtValue());
148 if (ArrayElementCount > MaxArraySizeForCombine)
149 return nullptr;
150
151 enum { Overdefined = -3, Undefined = -2 };
152
153 // Variables for our state machines.
154
155 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
156 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
157 // and 87 is the second (and last) index. FirstTrueElement is -2 when
158 // undefined, otherwise set to the first true element. SecondTrueElement is
159 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
160 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
161
162 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
163 // form "i != 47 & i != 87". Same state transitions as for true elements.
164 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
165
166 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
167 /// define a state machine that triggers for ranges of values that the index
168 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
169 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
170 /// index in the range (inclusive). We use -2 for undefined here because we
171 /// use relative comparisons and don't want 0-1 to match -1.
172 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
173
174 // MagicBitvector - This is a magic bitvector where we set a bit if the
175 // comparison is true for element 'i'. If there are 64 elements or less in
176 // the array, this will fully represent all the comparison results.
177 uint64_t MagicBitvector = 0;
178
179 // Scan the array and see if one of our patterns matches.
180 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
181 APInt Offset = ConstOffset;
182 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i, Offset += Stride) {
184 if (!Elt)
185 return nullptr;
186
187 // If the element is masked, handle it.
188 if (AndCst) {
189 Elt = ConstantFoldBinaryOpOperands(Instruction::And, Elt, AndCst, DL);
190 if (!Elt)
191 return nullptr;
192 }
193
194 // Find out if the comparison would be true or false for the i'th element.
196 CompareRHS, DL, &TLI);
197 if (!C)
198 return nullptr;
199
200 // If the result is undef for this element, ignore it.
201 if (isa<UndefValue>(C)) {
202 // Extend range state machines to cover this element in case there is an
203 // undef in the middle of the range.
204 if (TrueRangeEnd == (int)i - 1)
205 TrueRangeEnd = i;
206 if (FalseRangeEnd == (int)i - 1)
207 FalseRangeEnd = i;
208 continue;
209 }
210
211 // If we can't compute the result for any of the elements, we have to give
212 // up evaluating the entire conditional.
213 if (!isa<ConstantInt>(C))
214 return nullptr;
215
216 // Otherwise, we know if the comparison is true or false for this element,
217 // update our state machines.
218 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
219
220 // State machine for single/double/range index comparison.
221 if (IsTrueForElt) {
222 // Update the TrueElement state machine.
223 if (FirstTrueElement == Undefined)
224 FirstTrueElement = TrueRangeEnd = i; // First true element.
225 else {
226 // Update double-compare state machine.
227 if (SecondTrueElement == Undefined)
228 SecondTrueElement = i;
229 else
230 SecondTrueElement = Overdefined;
231
232 // Update range state machine.
233 if (TrueRangeEnd == (int)i - 1)
234 TrueRangeEnd = i;
235 else
236 TrueRangeEnd = Overdefined;
237 }
238 } else {
239 // Update the FalseElement state machine.
240 if (FirstFalseElement == Undefined)
241 FirstFalseElement = FalseRangeEnd = i; // First false element.
242 else {
243 // Update double-compare state machine.
244 if (SecondFalseElement == Undefined)
245 SecondFalseElement = i;
246 else
247 SecondFalseElement = Overdefined;
248
249 // Update range state machine.
250 if (FalseRangeEnd == (int)i - 1)
251 FalseRangeEnd = i;
252 else
253 FalseRangeEnd = Overdefined;
254 }
255 }
256
257 // If this element is in range, update our magic bitvector.
258 if (i < 64 && IsTrueForElt)
259 MagicBitvector |= 1ULL << i;
260
261 // If all of our states become overdefined, bail out early. Since the
262 // predicate is expensive, only check it every 8 elements. This is only
263 // really useful for really huge arrays.
264 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
265 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
266 FalseRangeEnd == Overdefined)
267 return nullptr;
268 }
269
270 // Now that we've scanned the entire array, emit our new comparison(s). We
271 // order the state machines in complexity of the generated code.
272
273 // If inbounds keyword is not present, Idx * Stride can overflow.
274 // Let's assume that Stride is 2 and the wanted value is at offset 0.
275 // Then, there are two possible values for Idx to match offset 0:
276 // 0x00..00, 0x80..00.
277 // Emitting 'icmp eq Idx, 0' isn't correct in this case because the
278 // comparison is false if Idx was 0x80..00.
279 // We need to erase the highest countTrailingZeros(ElementSize) bits of Idx.
280 auto MaskIdx = [&](Value *Idx) {
281 if (!Expr.Flags.isInBounds() && Stride.countr_zero() != 0) {
283 Mask = Builder.CreateLShr(Mask, Stride.countr_zero());
284 Idx = Builder.CreateAnd(Idx, Mask);
285 }
286 return Idx;
287 };
288
289 // If the comparison is only true for one or two elements, emit direct
290 // comparisons.
291 if (SecondTrueElement != Overdefined) {
292 Idx = MaskIdx(Idx);
293 // None true -> false.
294 if (FirstTrueElement == Undefined)
295 return replaceInstUsesWith(ICI, Builder.getFalse());
296
297 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
298
299 // True for one element -> 'i == 47'.
300 if (SecondTrueElement == Undefined)
301 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
302
303 // True for two elements -> 'i == 47 | i == 72'.
304 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
305 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
306 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
307 return BinaryOperator::CreateOr(C1, C2);
308 }
309
310 // If the comparison is only false for one or two elements, emit direct
311 // comparisons.
312 if (SecondFalseElement != Overdefined) {
313 Idx = MaskIdx(Idx);
314 // None false -> true.
315 if (FirstFalseElement == Undefined)
316 return replaceInstUsesWith(ICI, Builder.getTrue());
317
318 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
319
320 // False for one element -> 'i != 47'.
321 if (SecondFalseElement == Undefined)
322 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
323
324 // False for two elements -> 'i != 47 & i != 72'.
325 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
326 Value *SecondFalseIdx =
327 ConstantInt::get(Idx->getType(), SecondFalseElement);
328 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
329 return BinaryOperator::CreateAnd(C1, C2);
330 }
331
332 // If the comparison can be replaced with a range comparison for the elements
333 // where it is true, emit the range check.
334 if (TrueRangeEnd != Overdefined) {
335 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
336 Idx = MaskIdx(Idx);
337
338 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
339 if (FirstTrueElement) {
340 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
341 Idx = Builder.CreateAdd(Idx, Offs);
342 }
343
344 Value *End =
345 ConstantInt::get(Idx->getType(), TrueRangeEnd - FirstTrueElement + 1);
346 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
347 }
348
349 // False range check.
350 if (FalseRangeEnd != Overdefined) {
351 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
352 Idx = MaskIdx(Idx);
353 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
354 if (FirstFalseElement) {
355 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
356 Idx = Builder.CreateAdd(Idx, Offs);
357 }
358
359 Value *End =
360 ConstantInt::get(Idx->getType(), FalseRangeEnd - FirstFalseElement);
361 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
362 }
363
364 // If a magic bitvector captures the entire comparison state
365 // of this load, replace it with computation that does:
366 // ((magic_cst >> i) & 1) != 0
367 {
368 Type *Ty = nullptr;
369
370 // Look for an appropriate type:
371 // - The type of Idx if the magic fits
372 // - The smallest fitting legal type
373 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
374 Ty = Idx->getType();
375 else
376 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
377
378 if (Ty) {
379 Idx = MaskIdx(Idx);
380 Value *V = Builder.CreateIntCast(Idx, Ty, false);
381 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
382 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
383 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
384 }
385 }
386
387 return nullptr;
388}
389
390/// Returns true if we can rewrite Start as a GEP with pointer Base
391/// and some integer offset. The nodes that need to be re-written
392/// for this transformation will be added to Explored.
394 const DataLayout &DL,
395 SetVector<Value *> &Explored) {
396 SmallVector<Value *, 16> WorkList(1, Start);
397 Explored.insert(Base);
398
399 // The following traversal gives us an order which can be used
400 // when doing the final transformation. Since in the final
401 // transformation we create the PHI replacement instructions first,
402 // we don't have to get them in any particular order.
403 //
404 // However, for other instructions we will have to traverse the
405 // operands of an instruction first, which means that we have to
406 // do a post-order traversal.
407 while (!WorkList.empty()) {
409
410 while (!WorkList.empty()) {
411 if (Explored.size() >= 100)
412 return false;
413
414 Value *V = WorkList.back();
415
416 if (Explored.contains(V)) {
417 WorkList.pop_back();
418 continue;
419 }
420
422 // We've found some value that we can't explore which is different from
423 // the base. Therefore we can't do this transformation.
424 return false;
425
426 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
427 // Only allow inbounds GEPs with at most one variable offset.
428 auto IsNonConst = [](Value *V) { return !isa<ConstantInt>(V); };
429 if (!GEP->isInBounds() || count_if(GEP->indices(), IsNonConst) > 1)
430 return false;
431
432 NW = NW.intersectForOffsetAdd(GEP->getNoWrapFlags());
433 if (!Explored.contains(GEP->getOperand(0)))
434 WorkList.push_back(GEP->getOperand(0));
435 }
436
437 if (WorkList.back() == V) {
438 WorkList.pop_back();
439 // We've finished visiting this node, mark it as such.
440 Explored.insert(V);
441 }
442
443 if (auto *PN = dyn_cast<PHINode>(V)) {
444 // We cannot transform PHIs on unsplittable basic blocks.
445 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
446 return false;
447 Explored.insert(PN);
448 PHIs.insert(PN);
449 }
450 }
451
452 // Explore the PHI nodes further.
453 for (auto *PN : PHIs)
454 for (Value *Op : PN->incoming_values())
455 if (!Explored.contains(Op))
456 WorkList.push_back(Op);
457 }
458
459 // Make sure that we can do this. Since we can't insert GEPs in a basic
460 // block before a PHI node, we can't easily do this transformation if
461 // we have PHI node users of transformed instructions.
462 for (Value *Val : Explored) {
463 for (Value *Use : Val->uses()) {
464
465 auto *PHI = dyn_cast<PHINode>(Use);
466 auto *Inst = dyn_cast<Instruction>(Val);
467
468 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
469 !Explored.contains(PHI))
470 continue;
471
472 if (PHI->getParent() == Inst->getParent())
473 return false;
474 }
475 }
476 return true;
477}
478
479// Sets the appropriate insert point on Builder where we can add
480// a replacement Instruction for V (if that is possible).
481static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
482 bool Before = true) {
483 if (auto *PHI = dyn_cast<PHINode>(V)) {
484 BasicBlock *Parent = PHI->getParent();
485 Builder.SetInsertPoint(Parent, Parent->getFirstInsertionPt());
486 return;
487 }
488 if (auto *I = dyn_cast<Instruction>(V)) {
489 if (!Before)
490 I = &*std::next(I->getIterator());
491 Builder.SetInsertPoint(I);
492 return;
493 }
494 if (auto *A = dyn_cast<Argument>(V)) {
495 // Set the insertion point in the entry block.
496 BasicBlock &Entry = A->getParent()->getEntryBlock();
497 Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
498 return;
499 }
500 // Otherwise, this is a constant and we don't need to set a new
501 // insertion point.
502 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
503}
504
505/// Returns a re-written value of Start as an indexed GEP using Base as a
506/// pointer.
508 const DataLayout &DL,
509 SetVector<Value *> &Explored,
510 InstCombiner &IC) {
511 // Perform all the substitutions. This is a bit tricky because we can
512 // have cycles in our use-def chains.
513 // 1. Create the PHI nodes without any incoming values.
514 // 2. Create all the other values.
515 // 3. Add the edges for the PHI nodes.
516 // 4. Emit GEPs to get the original pointers.
517 // 5. Remove the original instructions.
518 Type *IndexType = IntegerType::get(
519 Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
520
522 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
523
524 // Create the new PHI nodes, without adding any incoming values.
525 for (Value *Val : Explored) {
526 if (Val == Base)
527 continue;
528 // Create empty phi nodes. This avoids cyclic dependencies when creating
529 // the remaining instructions.
530 if (auto *PHI = dyn_cast<PHINode>(Val))
531 NewInsts[PHI] =
532 PHINode::Create(IndexType, PHI->getNumIncomingValues(),
533 PHI->getName() + ".idx", PHI->getIterator());
534 }
535 IRBuilder<> Builder(Base->getContext());
536
537 // Create all the other instructions.
538 for (Value *Val : Explored) {
539 if (NewInsts.contains(Val))
540 continue;
541
542 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
543 setInsertionPoint(Builder, GEP);
544 Value *Op = NewInsts[GEP->getOperand(0)];
545 Value *OffsetV = emitGEPOffset(&Builder, DL, GEP);
547 NewInsts[GEP] = OffsetV;
548 else
549 NewInsts[GEP] = Builder.CreateAdd(
550 Op, OffsetV, GEP->getOperand(0)->getName() + ".add",
551 /*NUW=*/NW.hasNoUnsignedWrap(),
552 /*NSW=*/NW.hasNoUnsignedSignedWrap());
553 continue;
554 }
555 if (isa<PHINode>(Val))
556 continue;
557
558 llvm_unreachable("Unexpected instruction type");
559 }
560
561 // Add the incoming values to the PHI nodes.
562 for (Value *Val : Explored) {
563 if (Val == Base)
564 continue;
565 // All the instructions have been created, we can now add edges to the
566 // phi nodes.
567 if (auto *PHI = dyn_cast<PHINode>(Val)) {
568 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
569 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
570 Value *NewIncoming = PHI->getIncomingValue(I);
571
572 auto It = NewInsts.find(NewIncoming);
573 if (It != NewInsts.end())
574 NewIncoming = It->second;
575
576 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
577 }
578 }
579 }
580
581 for (Value *Val : Explored) {
582 if (Val == Base)
583 continue;
584
585 setInsertionPoint(Builder, Val, false);
586 // Create GEP for external users.
587 Value *NewVal = Builder.CreateGEP(Builder.getInt8Ty(), Base, NewInsts[Val],
588 Val->getName() + ".ptr", NW);
589 IC.replaceInstUsesWith(*cast<Instruction>(Val), NewVal);
590 // Add old instruction to worklist for DCE. We don't directly remove it
591 // here because the original compare is one of the users.
593 }
594
595 return NewInsts[Start];
596}
597
598/// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
599/// We can look through PHIs, GEPs and casts in order to determine a common base
600/// between GEPLHS and RHS.
603 const DataLayout &DL,
604 InstCombiner &IC) {
605 // FIXME: Support vector of pointers.
606 if (GEPLHS->getType()->isVectorTy())
607 return nullptr;
608
609 if (!GEPLHS->hasAllConstantIndices())
610 return nullptr;
611
612 APInt Offset(DL.getIndexTypeSizeInBits(GEPLHS->getType()), 0);
613 Value *PtrBase =
615 /*AllowNonInbounds*/ false);
616
617 // Bail if we looked through addrspacecast.
618 if (PtrBase->getType() != GEPLHS->getType())
619 return nullptr;
620
621 // The set of nodes that will take part in this transformation.
622 SetVector<Value *> Nodes;
623 GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags();
624 if (!canRewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes))
625 return nullptr;
626
627 // We know we can re-write this as
628 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
629 // Since we've only looked through inbouds GEPs we know that we
630 // can't have overflow on either side. We can therefore re-write
631 // this as:
632 // OFFSET1 cmp OFFSET2
633 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes, IC);
634
635 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
636 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
637 // offset. Since Index is the offset of LHS to the base pointer, we will now
638 // compare the offsets instead of comparing the pointers.
640 IC.Builder.getInt(Offset), NewRHS);
641}
642
643/// Fold comparisons between a GEP instruction and something else. At this point
644/// we know that the GEP is on the LHS of the comparison.
647 // Don't transform signed compares of GEPs into index compares. Even if the
648 // GEP is inbounds, the final add of the base pointer can have signed overflow
649 // and would change the result of the icmp.
650 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
651 // the maximum signed value for the pointer type.
653 return nullptr;
654
655 // Look through bitcasts and addrspacecasts. We do not however want to remove
656 // 0 GEPs.
657 if (!isa<GetElementPtrInst>(RHS))
658 RHS = RHS->stripPointerCasts();
659
660 auto CanFold = [Cond](GEPNoWrapFlags NW) {
662 return true;
663
664 // Unsigned predicates can be folded if the GEPs have *any* nowrap flags.
666 return NW != GEPNoWrapFlags::none();
667 };
668
669 auto NewICmp = [Cond](GEPNoWrapFlags NW, Value *Op1, Value *Op2) {
670 if (!NW.hasNoUnsignedWrap()) {
671 // Convert signed to unsigned comparison.
672 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Op1, Op2);
673 }
674
675 auto *I = new ICmpInst(Cond, Op1, Op2);
676 I->setSameSign(NW.hasNoUnsignedSignedWrap());
677 return I;
678 };
679
681 if (Base.Ptr == RHS && CanFold(Base.LHSNW) && !Base.isExpensive()) {
682 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
683 Type *IdxTy = DL.getIndexType(GEPLHS->getType());
684 Value *Offset =
685 EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, /*RewriteGEPs=*/true);
686 return NewICmp(Base.LHSNW, Offset,
687 Constant::getNullValue(Offset->getType()));
688 }
689
690 if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
691 isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
692 !NullPointerIsDefined(I.getFunction(),
693 RHS->getType()->getPointerAddressSpace())) {
694 // For most address spaces, an allocation can't be placed at null, but null
695 // itself is treated as a 0 size allocation in the in bounds rules. Thus,
696 // the only valid inbounds address derived from null, is null itself.
697 // Thus, we have four cases to consider:
698 // 1) Base == nullptr, Offset == 0 -> inbounds, null
699 // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
700 // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
701 // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
702 //
703 // (Note if we're indexing a type of size 0, that simply collapses into one
704 // of the buckets above.)
705 //
706 // In general, we're allowed to make values less poison (i.e. remove
707 // sources of full UB), so in this case, we just select between the two
708 // non-poison cases (1 and 4 above).
709 //
710 // For vectors, we apply the same reasoning on a per-lane basis.
711 auto *Base = GEPLHS->getPointerOperand();
712 if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
713 auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
714 Base = Builder.CreateVectorSplat(EC, Base);
715 }
716 return new ICmpInst(Cond, Base,
718 cast<Constant>(RHS), Base->getType()));
719 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
720 GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags() & GEPRHS->getNoWrapFlags();
721
722 // If the base pointers are different, but the indices are the same, just
723 // compare the base pointer.
724 if (GEPLHS->getOperand(0) != GEPRHS->getOperand(0)) {
725 bool IndicesTheSame =
726 GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
727 GEPLHS->getPointerOperand()->getType() ==
728 GEPRHS->getPointerOperand()->getType() &&
729 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType();
730 if (IndicesTheSame)
731 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
732 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
733 IndicesTheSame = false;
734 break;
735 }
736
737 // If all indices are the same, just compare the base pointers.
738 Type *BaseType = GEPLHS->getOperand(0)->getType();
739 if (IndicesTheSame &&
740 CmpInst::makeCmpResultType(BaseType) == I.getType() && CanFold(NW))
741 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
742
743 // If we're comparing GEPs with two base pointers that only differ in type
744 // and both GEPs have only constant indices or just one use, then fold
745 // the compare with the adjusted indices.
746 // FIXME: Support vector of pointers.
747 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
748 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
749 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
750 GEPLHS->getOperand(0)->stripPointerCasts() ==
751 GEPRHS->getOperand(0)->stripPointerCasts() &&
752 !GEPLHS->getType()->isVectorTy()) {
753 Value *LOffset = EmitGEPOffset(GEPLHS);
754 Value *ROffset = EmitGEPOffset(GEPRHS);
755
756 // If we looked through an addrspacecast between different sized address
757 // spaces, the LHS and RHS pointers are different sized
758 // integers. Truncate to the smaller one.
759 Type *LHSIndexTy = LOffset->getType();
760 Type *RHSIndexTy = ROffset->getType();
761 if (LHSIndexTy != RHSIndexTy) {
762 if (LHSIndexTy->getPrimitiveSizeInBits().getFixedValue() <
763 RHSIndexTy->getPrimitiveSizeInBits().getFixedValue()) {
764 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
765 } else
766 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
767 }
768
770 LOffset, ROffset);
771 return replaceInstUsesWith(I, Cmp);
772 }
773 }
774
775 if (GEPLHS->getOperand(0) == GEPRHS->getOperand(0) &&
776 GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
777 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType()) {
778 // If the GEPs only differ by one index, compare it.
779 unsigned NumDifferences = 0; // Keep track of # differences.
780 unsigned DiffOperand = 0; // The operand that differs.
781 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
782 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
783 Type *LHSType = GEPLHS->getOperand(i)->getType();
784 Type *RHSType = GEPRHS->getOperand(i)->getType();
785 // FIXME: Better support for vector of pointers.
786 if (LHSType->getPrimitiveSizeInBits() !=
787 RHSType->getPrimitiveSizeInBits() ||
788 (GEPLHS->getType()->isVectorTy() &&
789 (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
790 // Irreconcilable differences.
791 NumDifferences = 2;
792 break;
793 }
794
795 if (NumDifferences++)
796 break;
797 DiffOperand = i;
798 }
799
800 if (NumDifferences == 0) // SAME GEP?
801 return replaceInstUsesWith(
802 I, // No comparison is needed here.
803 ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
804 // If two GEPs only differ by an index, compare them.
805 // Note that nowrap flags are always needed when comparing two indices.
806 else if (NumDifferences == 1 && NW != GEPNoWrapFlags::none()) {
807 Value *LHSV = GEPLHS->getOperand(DiffOperand);
808 Value *RHSV = GEPRHS->getOperand(DiffOperand);
809 return NewICmp(NW, LHSV, RHSV);
810 }
811 }
812
813 if (Base.Ptr && CanFold(Base.LHSNW & Base.RHSNW) && !Base.isExpensive()) {
814 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
815 Type *IdxTy = DL.getIndexType(GEPLHS->getType());
816 Value *L =
817 EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, /*RewriteGEP=*/true);
818 Value *R =
819 EmitGEPOffsets(Base.RHSGEPs, Base.RHSNW, IdxTy, /*RewriteGEP=*/true);
820 return NewICmp(Base.LHSNW & Base.RHSNW, L, R);
821 }
822 }
823
824 // Try convert this to an indexed compare by looking through PHIs/casts as a
825 // last resort.
826 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL, *this);
827}
828
830 // It would be tempting to fold away comparisons between allocas and any
831 // pointer not based on that alloca (e.g. an argument). However, even
832 // though such pointers cannot alias, they can still compare equal.
833 //
834 // But LLVM doesn't specify where allocas get their memory, so if the alloca
835 // doesn't escape we can argue that it's impossible to guess its value, and we
836 // can therefore act as if any such guesses are wrong.
837 //
838 // However, we need to ensure that this folding is consistent: We can't fold
839 // one comparison to false, and then leave a different comparison against the
840 // same value alone (as it might evaluate to true at runtime, leading to a
841 // contradiction). As such, this code ensures that all comparisons are folded
842 // at the same time, and there are no other escapes.
843
844 struct CmpCaptureTracker : public CaptureTracker {
845 AllocaInst *Alloca;
846 bool Captured = false;
847 /// The value of the map is a bit mask of which icmp operands the alloca is
848 /// used in.
850
851 CmpCaptureTracker(AllocaInst *Alloca) : Alloca(Alloca) {}
852
853 void tooManyUses() override { Captured = true; }
854
855 Action captured(const Use *U, UseCaptureInfo CI) override {
856 // TODO(captures): Use UseCaptureInfo.
857 auto *ICmp = dyn_cast<ICmpInst>(U->getUser());
858 // We need to check that U is based *only* on the alloca, and doesn't
859 // have other contributions from a select/phi operand.
860 // TODO: We could check whether getUnderlyingObjects() reduces to one
861 // object, which would allow looking through phi nodes.
862 if (ICmp && ICmp->isEquality() && getUnderlyingObject(*U) == Alloca) {
863 // Collect equality icmps of the alloca, and don't treat them as
864 // captures.
865 ICmps[ICmp] |= 1u << U->getOperandNo();
866 return Continue;
867 }
868
869 Captured = true;
870 return Stop;
871 }
872 };
873
874 CmpCaptureTracker Tracker(Alloca);
875 PointerMayBeCaptured(Alloca, &Tracker);
876 if (Tracker.Captured)
877 return false;
878
879 bool Changed = false;
880 for (auto [ICmp, Operands] : Tracker.ICmps) {
881 switch (Operands) {
882 case 1:
883 case 2: {
884 // The alloca is only used in one icmp operand. Assume that the
885 // equality is false.
886 auto *Res = ConstantInt::get(ICmp->getType(),
887 ICmp->getPredicate() == ICmpInst::ICMP_NE);
888 replaceInstUsesWith(*ICmp, Res);
890 Changed = true;
891 break;
892 }
893 case 3:
894 // Both icmp operands are based on the alloca, so this is comparing
895 // pointer offsets, without leaking any information about the address
896 // of the alloca. Ignore such comparisons.
897 break;
898 default:
899 llvm_unreachable("Cannot happen");
900 }
901 }
902
903 return Changed;
904}
905
906/// Fold "icmp pred (X+C), X".
908 CmpPredicate Pred) {
909 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
910 // so the values can never be equal. Similarly for all other "or equals"
911 // operators.
912 assert(!!C && "C should not be zero!");
913
914 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
915 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
916 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
917 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
918 Constant *R =
919 ConstantInt::get(X->getType(), APInt::getMaxValue(C.getBitWidth()) - C);
920 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
921 }
922
923 // (X+1) >u X --> X <u (0-1) --> X != 255
924 // (X+2) >u X --> X <u (0-2) --> X <u 254
925 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
926 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
927 return new ICmpInst(ICmpInst::ICMP_ULT, X,
928 ConstantInt::get(X->getType(), -C));
929
930 APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
931
932 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
933 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
934 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
935 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
936 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
937 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
938 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
939 return new ICmpInst(ICmpInst::ICMP_SGT, X,
940 ConstantInt::get(X->getType(), SMax - C));
941
942 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
943 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
944 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
945 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
946 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
947 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
948
949 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
950 return new ICmpInst(ICmpInst::ICMP_SLT, X,
951 ConstantInt::get(X->getType(), SMax - (C - 1)));
952}
953
954/// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
955/// (icmp eq/ne A, Log2(AP2/AP1)) ->
956/// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
958 const APInt &AP1,
959 const APInt &AP2) {
960 assert(I.isEquality() && "Cannot fold icmp gt/lt");
961
962 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
963 if (I.getPredicate() == I.ICMP_NE)
964 Pred = CmpInst::getInversePredicate(Pred);
965 return new ICmpInst(Pred, LHS, RHS);
966 };
967
968 // Don't bother doing any work for cases which InstSimplify handles.
969 if (AP2.isZero())
970 return nullptr;
971
972 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
973 if (IsAShr) {
974 if (AP2.isAllOnes())
975 return nullptr;
976 if (AP2.isNegative() != AP1.isNegative())
977 return nullptr;
978 if (AP2.sgt(AP1))
979 return nullptr;
980 }
981
982 if (!AP1)
983 // 'A' must be large enough to shift out the highest set bit.
984 return getICmp(I.ICMP_UGT, A,
985 ConstantInt::get(A->getType(), AP2.logBase2()));
986
987 if (AP1 == AP2)
988 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
989
990 int Shift;
991 if (IsAShr && AP1.isNegative())
992 Shift = AP1.countl_one() - AP2.countl_one();
993 else
994 Shift = AP1.countl_zero() - AP2.countl_zero();
995
996 if (Shift > 0) {
997 if (IsAShr && AP1 == AP2.ashr(Shift)) {
998 // There are multiple solutions if we are comparing against -1 and the LHS
999 // of the ashr is not a power of two.
1000 if (AP1.isAllOnes() && !AP2.isPowerOf2())
1001 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1002 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1003 } else if (AP1 == AP2.lshr(Shift)) {
1004 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1005 }
1006 }
1007
1008 // Shifting const2 will never be equal to const1.
1009 // FIXME: This should always be handled by InstSimplify?
1010 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1011 return replaceInstUsesWith(I, TorF);
1012}
1013
1014/// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1015/// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1017 const APInt &AP1,
1018 const APInt &AP2) {
1019 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1020
1021 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1022 if (I.getPredicate() == I.ICMP_NE)
1023 Pred = CmpInst::getInversePredicate(Pred);
1024 return new ICmpInst(Pred, LHS, RHS);
1025 };
1026
1027 // Don't bother doing any work for cases which InstSimplify handles.
1028 if (AP2.isZero())
1029 return nullptr;
1030
1031 unsigned AP2TrailingZeros = AP2.countr_zero();
1032
1033 if (!AP1 && AP2TrailingZeros != 0)
1034 return getICmp(
1035 I.ICMP_UGE, A,
1036 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1037
1038 if (AP1 == AP2)
1039 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1040
1041 // Get the distance between the lowest bits that are set.
1042 int Shift = AP1.countr_zero() - AP2TrailingZeros;
1043
1044 if (Shift > 0 && AP2.shl(Shift) == AP1)
1045 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1046
1047 // Shifting const2 will never be equal to const1.
1048 // FIXME: This should always be handled by InstSimplify?
1049 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1050 return replaceInstUsesWith(I, TorF);
1051}
1052
1053/// The caller has matched a pattern of the form:
1054/// I = icmp ugt (add (add A, B), CI2), CI1
1055/// If this is of the form:
1056/// sum = a + b
1057/// if (sum+128 >u 255)
1058/// Then replace it with llvm.sadd.with.overflow.i8.
1059///
1061 ConstantInt *CI2, ConstantInt *CI1,
1062 InstCombinerImpl &IC) {
1063 // The transformation we're trying to do here is to transform this into an
1064 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1065 // with a narrower add, and discard the add-with-constant that is part of the
1066 // range check (if we can't eliminate it, this isn't profitable).
1067
1068 // In order to eliminate the add-with-constant, the compare can be its only
1069 // use.
1070 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1071 if (!AddWithCst->hasOneUse())
1072 return nullptr;
1073
1074 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1075 if (!CI2->getValue().isPowerOf2())
1076 return nullptr;
1077 unsigned NewWidth = CI2->getValue().countr_zero();
1078 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1079 return nullptr;
1080
1081 // The width of the new add formed is 1 more than the bias.
1082 ++NewWidth;
1083
1084 // Check to see that CI1 is an all-ones value with NewWidth bits.
1085 if (CI1->getBitWidth() == NewWidth ||
1086 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1087 return nullptr;
1088
1089 // This is only really a signed overflow check if the inputs have been
1090 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1091 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1092 if (IC.ComputeMaxSignificantBits(A, &I) > NewWidth ||
1093 IC.ComputeMaxSignificantBits(B, &I) > NewWidth)
1094 return nullptr;
1095
1096 // In order to replace the original add with a narrower
1097 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1098 // and truncates that discard the high bits of the add. Verify that this is
1099 // the case.
1100 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1101 for (User *U : OrigAdd->users()) {
1102 if (U == AddWithCst)
1103 continue;
1104
1105 // Only accept truncates for now. We would really like a nice recursive
1106 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1107 // chain to see which bits of a value are actually demanded. If the
1108 // original add had another add which was then immediately truncated, we
1109 // could still do the transformation.
1111 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1112 return nullptr;
1113 }
1114
1115 // If the pattern matches, truncate the inputs to the narrower type and
1116 // use the sadd_with_overflow intrinsic to efficiently compute both the
1117 // result and the overflow bit.
1118 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1120 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1121
1122 InstCombiner::BuilderTy &Builder = IC.Builder;
1123
1124 // Put the new code above the original add, in case there are any uses of the
1125 // add between the add and the compare.
1126 Builder.SetInsertPoint(OrigAdd);
1127
1128 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1129 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1130 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1131 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1132 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1133
1134 // The inner add was the result of the narrow add, zero extended to the
1135 // wider type. Replace it with the result computed by the intrinsic.
1136 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1137 IC.eraseInstFromFunction(*OrigAdd);
1138
1139 // The original icmp gets replaced with the overflow value.
1140 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1141}
1142
1143/// If we have:
1144/// icmp eq/ne (urem/srem %x, %y), 0
1145/// iff %y is a power-of-two, we can replace this with a bit test:
1146/// icmp eq/ne (and %x, (add %y, -1)), 0
1148 // This fold is only valid for equality predicates.
1149 if (!I.isEquality())
1150 return nullptr;
1151 CmpPredicate Pred;
1152 Value *X, *Y, *Zero;
1153 if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1154 m_CombineAnd(m_Zero(), m_Value(Zero)))))
1155 return nullptr;
1156 if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, &I))
1157 return nullptr;
1158 // This may increase instruction count, we don't enforce that Y is a constant.
1159 Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1160 Value *Masked = Builder.CreateAnd(X, Mask);
1161 return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1162}
1163
1164/// Fold equality-comparison between zero and any (maybe truncated) right-shift
1165/// by one-less-than-bitwidth into a sign test on the original value.
1167 Instruction *Val;
1168 CmpPredicate Pred;
1169 if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1170 return nullptr;
1171
1172 Value *X;
1173 Type *XTy;
1174
1175 Constant *C;
1176 if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1177 XTy = X->getType();
1178 unsigned XBitWidth = XTy->getScalarSizeInBits();
1180 APInt(XBitWidth, XBitWidth - 1))))
1181 return nullptr;
1182 } else if (isa<BinaryOperator>(Val) &&
1184 cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1185 /*AnalyzeForSignBitExtraction=*/true))) {
1186 XTy = X->getType();
1187 } else
1188 return nullptr;
1189
1190 return ICmpInst::Create(Instruction::ICmp,
1194}
1195
1196// Handle icmp pred X, 0
1198 CmpInst::Predicate Pred = Cmp.getPredicate();
1199 if (!match(Cmp.getOperand(1), m_Zero()))
1200 return nullptr;
1201
1202 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1203 if (Pred == ICmpInst::ICMP_SGT) {
1204 Value *A, *B;
1205 if (match(Cmp.getOperand(0), m_SMin(m_Value(A), m_Value(B)))) {
1206 if (isKnownPositive(A, SQ.getWithInstruction(&Cmp)))
1207 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1208 if (isKnownPositive(B, SQ.getWithInstruction(&Cmp)))
1209 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1210 }
1211 }
1212
1214 return New;
1215
1216 // Given:
1217 // icmp eq/ne (urem %x, %y), 0
1218 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1219 // icmp eq/ne %x, 0
1220 Value *X, *Y;
1221 if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1222 ICmpInst::isEquality(Pred)) {
1223 KnownBits XKnown = computeKnownBits(X, &Cmp);
1224 KnownBits YKnown = computeKnownBits(Y, &Cmp);
1225 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1226 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1227 }
1228
1229 // (icmp eq/ne (mul X Y)) -> (icmp eq/ne X/Y) if we know about whether X/Y are
1230 // odd/non-zero/there is no overflow.
1231 if (match(Cmp.getOperand(0), m_Mul(m_Value(X), m_Value(Y))) &&
1232 ICmpInst::isEquality(Pred)) {
1233
1234 KnownBits XKnown = computeKnownBits(X, &Cmp);
1235 // if X % 2 != 0
1236 // (icmp eq/ne Y)
1237 if (XKnown.countMaxTrailingZeros() == 0)
1238 return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1239
1240 KnownBits YKnown = computeKnownBits(Y, &Cmp);
1241 // if Y % 2 != 0
1242 // (icmp eq/ne X)
1243 if (YKnown.countMaxTrailingZeros() == 0)
1244 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1245
1246 auto *BO0 = cast<OverflowingBinaryOperator>(Cmp.getOperand(0));
1247 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1248 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
1249 // `isKnownNonZero` does more analysis than just `!KnownBits.One.isZero()`
1250 // but to avoid unnecessary work, first just if this is an obvious case.
1251
1252 // if X non-zero and NoOverflow(X * Y)
1253 // (icmp eq/ne Y)
1254 if (!XKnown.One.isZero() || isKnownNonZero(X, Q))
1255 return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1256
1257 // if Y non-zero and NoOverflow(X * Y)
1258 // (icmp eq/ne X)
1259 if (!YKnown.One.isZero() || isKnownNonZero(Y, Q))
1260 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1261 }
1262 // Note, we are skipping cases:
1263 // if Y % 2 != 0 AND X % 2 != 0
1264 // (false/true)
1265 // if X non-zero and Y non-zero and NoOverflow(X * Y)
1266 // (false/true)
1267 // Those can be simplified later as we would have already replaced the (icmp
1268 // eq/ne (mul X, Y)) with (icmp eq/ne X/Y) and if X/Y is known non-zero that
1269 // will fold to a constant elsewhere.
1270 }
1271
1272 // (icmp eq/ne f(X), 0) -> (icmp eq/ne X, 0)
1273 // where f(X) == 0 if and only if X == 0
1274 if (ICmpInst::isEquality(Pred))
1275 if (Value *Stripped = stripNullTest(Cmp.getOperand(0)))
1276 return new ICmpInst(Pred, Stripped,
1277 Constant::getNullValue(Stripped->getType()));
1278
1279 return nullptr;
1280}
1281
1282/// Fold icmp eq (num + mask) & ~mask, num
1283/// to
1284/// icmp eq (and num, mask), 0
1285/// Where mask is a low bit mask.
1287 Value *Num;
1288 CmpPredicate Pred;
1289 const APInt *Mask, *Neg;
1290
1291 if (!match(&Cmp,
1292 m_c_ICmp(Pred, m_Value(Num),
1294 m_LowBitMask(Mask))),
1295 m_APInt(Neg))))))
1296 return nullptr;
1297
1298 if (*Neg != ~*Mask)
1299 return nullptr;
1300
1301 if (!ICmpInst::isEquality(Pred))
1302 return nullptr;
1303
1304 // Create new icmp eq (num & mask), 0
1305 auto *NewAnd = Builder.CreateAnd(Num, *Mask);
1306 auto *Zero = Constant::getNullValue(Num->getType());
1307
1308 return new ICmpInst(Pred, NewAnd, Zero);
1309}
1310
1311/// Fold icmp Pred X, C.
1312/// TODO: This code structure does not make sense. The saturating add fold
1313/// should be moved to some other helper and extended as noted below (it is also
1314/// possible that code has been made unnecessary - do we canonicalize IR to
1315/// overflow/saturating intrinsics or not?).
1317 // Match the following pattern, which is a common idiom when writing
1318 // overflow-safe integer arithmetic functions. The source performs an addition
1319 // in wider type and explicitly checks for overflow using comparisons against
1320 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1321 //
1322 // TODO: This could probably be generalized to handle other overflow-safe
1323 // operations if we worked out the formulas to compute the appropriate magic
1324 // constants.
1325 //
1326 // sum = a + b
1327 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1328 CmpInst::Predicate Pred = Cmp.getPredicate();
1329 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1330 Value *A, *B;
1331 ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1332 if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1333 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1334 if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1335 return Res;
1336
1337 // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1339 if (!C)
1340 return nullptr;
1341
1342 if (auto *Phi = dyn_cast<PHINode>(Op0))
1343 if (all_of(Phi->operands(), IsaPred<Constant>)) {
1345 for (Value *V : Phi->incoming_values()) {
1346 Constant *Res =
1348 if (!Res)
1349 return nullptr;
1350 Ops.push_back(Res);
1351 }
1352 Builder.SetInsertPoint(Phi);
1353 PHINode *NewPhi = Builder.CreatePHI(Cmp.getType(), Phi->getNumOperands());
1354 for (auto [V, Pred] : zip(Ops, Phi->blocks()))
1355 NewPhi->addIncoming(V, Pred);
1356 return replaceInstUsesWith(Cmp, NewPhi);
1357 }
1358
1360 return R;
1361
1362 return nullptr;
1363}
1364
1365/// Canonicalize icmp instructions based on dominating conditions.
1367 // We already checked simple implication in InstSimplify, only handle complex
1368 // cases here.
1369 Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1370 const APInt *C;
1371 if (!match(Y, m_APInt(C)))
1372 return nullptr;
1373
1374 CmpInst::Predicate Pred = Cmp.getPredicate();
1376
1377 auto handleDomCond = [&](ICmpInst::Predicate DomPred,
1378 const APInt *DomC) -> Instruction * {
1379 // We have 2 compares of a variable with constants. Calculate the constant
1380 // ranges of those compares to see if we can transform the 2nd compare:
1381 // DomBB:
1382 // DomCond = icmp DomPred X, DomC
1383 // br DomCond, CmpBB, FalseBB
1384 // CmpBB:
1385 // Cmp = icmp Pred X, C
1386 ConstantRange DominatingCR =
1387 ConstantRange::makeExactICmpRegion(DomPred, *DomC);
1388 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1389 ConstantRange Difference = DominatingCR.difference(CR);
1390 if (Intersection.isEmptySet())
1391 return replaceInstUsesWith(Cmp, Builder.getFalse());
1392 if (Difference.isEmptySet())
1393 return replaceInstUsesWith(Cmp, Builder.getTrue());
1394
1395 // Canonicalizing a sign bit comparison that gets used in a branch,
1396 // pessimizes codegen by generating branch on zero instruction instead
1397 // of a test and branch. So we avoid canonicalizing in such situations
1398 // because test and branch instruction has better branch displacement
1399 // than compare and branch instruction.
1400 bool UnusedBit;
1401 bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1402 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1403 return nullptr;
1404
1405 // Avoid an infinite loop with min/max canonicalization.
1406 // TODO: This will be unnecessary if we canonicalize to min/max intrinsics.
1407 if (Cmp.hasOneUse() &&
1408 match(Cmp.user_back(), m_MaxOrMin(m_Value(), m_Value())))
1409 return nullptr;
1410
1411 if (const APInt *EqC = Intersection.getSingleElement())
1412 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1413 if (const APInt *NeC = Difference.getSingleElement())
1414 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1415 return nullptr;
1416 };
1417
1418 for (BranchInst *BI : DC.conditionsFor(X)) {
1419 CmpPredicate DomPred;
1420 const APInt *DomC;
1421 if (!match(BI->getCondition(),
1422 m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))))
1423 continue;
1424
1425 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
1426 if (DT.dominates(Edge0, Cmp.getParent())) {
1427 if (auto *V = handleDomCond(DomPred, DomC))
1428 return V;
1429 } else {
1430 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
1431 if (DT.dominates(Edge1, Cmp.getParent()))
1432 if (auto *V =
1433 handleDomCond(CmpInst::getInversePredicate(DomPred), DomC))
1434 return V;
1435 }
1436 }
1437
1438 return nullptr;
1439}
1440
1441/// Fold icmp (trunc X), C.
1443 TruncInst *Trunc,
1444 const APInt &C) {
1445 ICmpInst::Predicate Pred = Cmp.getPredicate();
1446 Value *X = Trunc->getOperand(0);
1447 Type *SrcTy = X->getType();
1448 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1449 SrcBits = SrcTy->getScalarSizeInBits();
1450
1451 // Match (icmp pred (trunc nuw/nsw X), C)
1452 // Which we can convert to (icmp pred X, (sext/zext C))
1453 if (shouldChangeType(Trunc->getType(), SrcTy)) {
1454 if (Trunc->hasNoSignedWrap())
1455 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.sext(SrcBits)));
1456 if (!Cmp.isSigned() && Trunc->hasNoUnsignedWrap())
1457 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.zext(SrcBits)));
1458 }
1459
1460 if (C.isOne() && C.getBitWidth() > 1) {
1461 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1462 Value *V = nullptr;
1463 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1464 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1465 ConstantInt::get(V->getType(), 1));
1466 }
1467
1468 // TODO: Handle non-equality predicates.
1469 Value *Y;
1470 const APInt *Pow2;
1471 if (Cmp.isEquality() && match(X, m_Shl(m_Power2(Pow2), m_Value(Y))) &&
1472 DstBits > Pow2->logBase2()) {
1473 // (trunc (Pow2 << Y) to iN) == 0 --> Y u>= N - log2(Pow2)
1474 // (trunc (Pow2 << Y) to iN) != 0 --> Y u< N - log2(Pow2)
1475 // iff N > log2(Pow2)
1476 if (C.isZero()) {
1477 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1478 return new ICmpInst(NewPred, Y,
1479 ConstantInt::get(SrcTy, DstBits - Pow2->logBase2()));
1480 }
1481 // (trunc (Pow2 << Y) to iN) == 2**C --> Y == C - log2(Pow2)
1482 // (trunc (Pow2 << Y) to iN) != 2**C --> Y != C - log2(Pow2)
1483 if (C.isPowerOf2())
1484 return new ICmpInst(
1485 Pred, Y, ConstantInt::get(SrcTy, C.logBase2() - Pow2->logBase2()));
1486 }
1487
1488 if (Cmp.isEquality() && (Trunc->hasOneUse() || Trunc->hasNoUnsignedWrap())) {
1489 // Canonicalize to a mask and wider compare if the wide type is suitable:
1490 // (trunc X to i8) == C --> (X & 0xff) == (zext C)
1491 if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1492 Constant *Mask =
1493 ConstantInt::get(SrcTy, APInt::getLowBitsSet(SrcBits, DstBits));
1494 Value *And = Trunc->hasNoUnsignedWrap() ? X : Builder.CreateAnd(X, Mask);
1495 Constant *WideC = ConstantInt::get(SrcTy, C.zext(SrcBits));
1496 return new ICmpInst(Pred, And, WideC);
1497 }
1498
1499 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1500 // of the high bits truncated out of x are known.
1501 KnownBits Known = computeKnownBits(X, &Cmp);
1502
1503 // If all the high bits are known, we can do this xform.
1504 if ((Known.Zero | Known.One).countl_one() >= SrcBits - DstBits) {
1505 // Pull in the high bits from known-ones set.
1506 APInt NewRHS = C.zext(SrcBits);
1507 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1508 return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, NewRHS));
1509 }
1510 }
1511
1512 // Look through truncated right-shift of the sign-bit for a sign-bit check:
1513 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] < 0 --> ShOp < 0
1514 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] > -1 --> ShOp > -1
1515 Value *ShOp;
1516 uint64_t ShAmt;
1517 bool TrueIfSigned;
1518 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
1519 match(X, m_Shr(m_Value(ShOp), m_ConstantInt(ShAmt))) &&
1520 DstBits == SrcBits - ShAmt) {
1521 return TrueIfSigned ? new ICmpInst(ICmpInst::ICMP_SLT, ShOp,
1523 : new ICmpInst(ICmpInst::ICMP_SGT, ShOp,
1525 }
1526
1527 return nullptr;
1528}
1529
1530/// Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
1531/// Fold icmp (trunc nuw/nsw X), (zext/sext Y).
1534 const SimplifyQuery &Q) {
1535 Value *X, *Y;
1536 CmpPredicate Pred;
1537 bool YIsSExt = false;
1538 // Try to match icmp (trunc X), (trunc Y)
1539 if (match(&Cmp, m_ICmp(Pred, m_Trunc(m_Value(X)), m_Trunc(m_Value(Y))))) {
1540 unsigned NoWrapFlags = cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1541 cast<TruncInst>(Cmp.getOperand(1))->getNoWrapKind();
1542 if (Cmp.isSigned()) {
1543 // For signed comparisons, both truncs must be nsw.
1544 if (!(NoWrapFlags & TruncInst::NoSignedWrap))
1545 return nullptr;
1546 } else {
1547 // For unsigned and equality comparisons, either both must be nuw or
1548 // both must be nsw, we don't care which.
1549 if (!NoWrapFlags)
1550 return nullptr;
1551 }
1552
1553 if (X->getType() != Y->getType() &&
1554 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1555 return nullptr;
1556 if (!isDesirableIntType(X->getType()->getScalarSizeInBits()) &&
1557 isDesirableIntType(Y->getType()->getScalarSizeInBits())) {
1558 std::swap(X, Y);
1559 Pred = Cmp.getSwappedPredicate(Pred);
1560 }
1561 YIsSExt = !(NoWrapFlags & TruncInst::NoUnsignedWrap);
1562 }
1563 // Try to match icmp (trunc nuw X), (zext Y)
1564 else if (!Cmp.isSigned() &&
1565 match(&Cmp, m_c_ICmp(Pred, m_NUWTrunc(m_Value(X)),
1566 m_OneUse(m_ZExt(m_Value(Y)))))) {
1567 // Can fold trunc nuw + zext for unsigned and equality predicates.
1568 }
1569 // Try to match icmp (trunc nsw X), (sext Y)
1570 else if (match(&Cmp, m_c_ICmp(Pred, m_NSWTrunc(m_Value(X)),
1572 // Can fold trunc nsw + zext/sext for all predicates.
1573 YIsSExt =
1574 isa<SExtInst>(Cmp.getOperand(0)) || isa<SExtInst>(Cmp.getOperand(1));
1575 } else
1576 return nullptr;
1577
1578 Type *TruncTy = Cmp.getOperand(0)->getType();
1579 unsigned TruncBits = TruncTy->getScalarSizeInBits();
1580
1581 // If this transform will end up changing from desirable types -> undesirable
1582 // types skip it.
1583 if (isDesirableIntType(TruncBits) &&
1584 !isDesirableIntType(X->getType()->getScalarSizeInBits()))
1585 return nullptr;
1586
1587 Value *NewY = Builder.CreateIntCast(Y, X->getType(), YIsSExt);
1588 return new ICmpInst(Pred, X, NewY);
1589}
1590
1591/// Fold icmp (xor X, Y), C.
1594 const APInt &C) {
1595 if (Instruction *I = foldICmpXorShiftConst(Cmp, Xor, C))
1596 return I;
1597
1598 Value *X = Xor->getOperand(0);
1599 Value *Y = Xor->getOperand(1);
1600 const APInt *XorC;
1601 if (!match(Y, m_APInt(XorC)))
1602 return nullptr;
1603
1604 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1605 // fold the xor.
1606 ICmpInst::Predicate Pred = Cmp.getPredicate();
1607 bool TrueIfSigned = false;
1608 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1609
1610 // If the sign bit of the XorCst is not set, there is no change to
1611 // the operation, just stop using the Xor.
1612 if (!XorC->isNegative())
1613 return replaceOperand(Cmp, 0, X);
1614
1615 // Emit the opposite comparison.
1616 if (TrueIfSigned)
1617 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1618 ConstantInt::getAllOnesValue(X->getType()));
1619 else
1620 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1621 ConstantInt::getNullValue(X->getType()));
1622 }
1623
1624 if (Xor->hasOneUse()) {
1625 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1626 if (!Cmp.isEquality() && XorC->isSignMask()) {
1627 Pred = Cmp.getFlippedSignednessPredicate();
1628 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1629 }
1630
1631 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1632 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1633 Pred = Cmp.getFlippedSignednessPredicate();
1634 Pred = Cmp.getSwappedPredicate(Pred);
1635 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1636 }
1637 }
1638
1639 // Mask constant magic can eliminate an 'xor' with unsigned compares.
1640 if (Pred == ICmpInst::ICMP_UGT) {
1641 // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1642 if (*XorC == ~C && (C + 1).isPowerOf2())
1643 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1644 // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1645 if (*XorC == C && (C + 1).isPowerOf2())
1646 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1647 }
1648 if (Pred == ICmpInst::ICMP_ULT) {
1649 // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1650 if (*XorC == -C && C.isPowerOf2())
1651 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1652 ConstantInt::get(X->getType(), ~C));
1653 // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1654 if (*XorC == C && (-C).isPowerOf2())
1655 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1656 ConstantInt::get(X->getType(), ~C));
1657 }
1658 return nullptr;
1659}
1660
1661/// For power-of-2 C:
1662/// ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1)
1663/// ((X s>> ShiftC) ^ X) u> (C - 1) --> (X + C) u> ((C << 1) - 1)
1666 const APInt &C) {
1667 CmpInst::Predicate Pred = Cmp.getPredicate();
1668 APInt PowerOf2;
1669 if (Pred == ICmpInst::ICMP_ULT)
1670 PowerOf2 = C;
1671 else if (Pred == ICmpInst::ICMP_UGT && !C.isMaxValue())
1672 PowerOf2 = C + 1;
1673 else
1674 return nullptr;
1675 if (!PowerOf2.isPowerOf2())
1676 return nullptr;
1677 Value *X;
1678 const APInt *ShiftC;
1680 m_AShr(m_Deferred(X), m_APInt(ShiftC))))))
1681 return nullptr;
1682 uint64_t Shift = ShiftC->getLimitedValue();
1683 Type *XType = X->getType();
1684 if (Shift == 0 || PowerOf2.isMinSignedValue())
1685 return nullptr;
1686 Value *Add = Builder.CreateAdd(X, ConstantInt::get(XType, PowerOf2));
1687 APInt Bound =
1688 Pred == ICmpInst::ICMP_ULT ? PowerOf2 << 1 : ((PowerOf2 << 1) - 1);
1689 return new ICmpInst(Pred, Add, ConstantInt::get(XType, Bound));
1690}
1691
1692/// Fold icmp (and (sh X, Y), C2), C1.
1695 const APInt &C1,
1696 const APInt &C2) {
1697 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1698 if (!Shift || !Shift->isShift())
1699 return nullptr;
1700
1701 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1702 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1703 // code produced by the clang front-end, for bitfield access.
1704 // This seemingly simple opportunity to fold away a shift turns out to be
1705 // rather complicated. See PR17827 for details.
1706 unsigned ShiftOpcode = Shift->getOpcode();
1707 bool IsShl = ShiftOpcode == Instruction::Shl;
1708 const APInt *C3;
1709 if (match(Shift->getOperand(1), m_APInt(C3))) {
1710 APInt NewAndCst, NewCmpCst;
1711 bool AnyCmpCstBitsShiftedOut;
1712 if (ShiftOpcode == Instruction::Shl) {
1713 // For a left shift, we can fold if the comparison is not signed. We can
1714 // also fold a signed comparison if the mask value and comparison value
1715 // are not negative. These constraints may not be obvious, but we can
1716 // prove that they are correct using an SMT solver.
1717 if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1718 return nullptr;
1719
1720 NewCmpCst = C1.lshr(*C3);
1721 NewAndCst = C2.lshr(*C3);
1722 AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1723 } else if (ShiftOpcode == Instruction::LShr) {
1724 // For a logical right shift, we can fold if the comparison is not signed.
1725 // We can also fold a signed comparison if the shifted mask value and the
1726 // shifted comparison value are not negative. These constraints may not be
1727 // obvious, but we can prove that they are correct using an SMT solver.
1728 NewCmpCst = C1.shl(*C3);
1729 NewAndCst = C2.shl(*C3);
1730 AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1731 if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1732 return nullptr;
1733 } else {
1734 // For an arithmetic shift, check that both constants don't use (in a
1735 // signed sense) the top bits being shifted out.
1736 assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1737 NewCmpCst = C1.shl(*C3);
1738 NewAndCst = C2.shl(*C3);
1739 AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1740 if (NewAndCst.ashr(*C3) != C2)
1741 return nullptr;
1742 }
1743
1744 if (AnyCmpCstBitsShiftedOut) {
1745 // If we shifted bits out, the fold is not going to work out. As a
1746 // special case, check to see if this means that the result is always
1747 // true or false now.
1748 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1749 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1750 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1751 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1752 } else {
1753 Value *NewAnd = Builder.CreateAnd(
1754 Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1755 return new ICmpInst(Cmp.getPredicate(), NewAnd,
1756 ConstantInt::get(And->getType(), NewCmpCst));
1757 }
1758 }
1759
1760 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1761 // preferable because it allows the C2 << Y expression to be hoisted out of a
1762 // loop if Y is invariant and X is not.
1763 if (Shift->hasOneUse() && C1.isZero() && Cmp.isEquality() &&
1764 !Shift->isArithmeticShift() &&
1765 ((!IsShl && C2.isOne()) || !isa<Constant>(Shift->getOperand(0)))) {
1766 // Compute C2 << Y.
1767 Value *NewShift =
1768 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1769 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1770
1771 // Compute X & (C2 << Y).
1772 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1773 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1774 }
1775
1776 return nullptr;
1777}
1778
1779/// Fold icmp (and X, C2), C1.
1782 const APInt &C1) {
1783 bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1784
1785 // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1786 // TODO: We canonicalize to the longer form for scalars because we have
1787 // better analysis/folds for icmp, and codegen may be better with icmp.
1788 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isZero() &&
1789 match(And->getOperand(1), m_One()))
1790 return new TruncInst(And->getOperand(0), Cmp.getType());
1791
1792 const APInt *C2;
1793 Value *X;
1794 if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1795 return nullptr;
1796
1797 // (and X, highmask) s> [0, ~highmask] --> X s> ~highmask
1798 if (Cmp.getPredicate() == ICmpInst::ICMP_SGT && C1.ule(~*C2) &&
1799 C2->isNegatedPowerOf2())
1800 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1801 ConstantInt::get(X->getType(), ~*C2));
1802 // (and X, highmask) s< [1, -highmask] --> X s< -highmask
1803 if (Cmp.getPredicate() == ICmpInst::ICMP_SLT && !C1.isSignMask() &&
1804 (C1 - 1).ule(~*C2) && C2->isNegatedPowerOf2() && !C2->isSignMask())
1805 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1806 ConstantInt::get(X->getType(), -*C2));
1807
1808 // Don't perform the following transforms if the AND has multiple uses
1809 if (!And->hasOneUse())
1810 return nullptr;
1811
1812 if (Cmp.isEquality() && C1.isZero()) {
1813 // Restrict this fold to single-use 'and' (PR10267).
1814 // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1815 if (C2->isSignMask()) {
1816 Constant *Zero = Constant::getNullValue(X->getType());
1817 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1818 return new ICmpInst(NewPred, X, Zero);
1819 }
1820
1821 APInt NewC2 = *C2;
1822 KnownBits Know = computeKnownBits(And->getOperand(0), And);
1823 // Set high zeros of C2 to allow matching negated power-of-2.
1824 NewC2 = *C2 | APInt::getHighBitsSet(C2->getBitWidth(),
1825 Know.countMinLeadingZeros());
1826
1827 // Restrict this fold only for single-use 'and' (PR10267).
1828 // ((%x & C) == 0) --> %x u< (-C) iff (-C) is power of two.
1829 if (NewC2.isNegatedPowerOf2()) {
1830 Constant *NegBOC = ConstantInt::get(And->getType(), -NewC2);
1831 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1832 return new ICmpInst(NewPred, X, NegBOC);
1833 }
1834 }
1835
1836 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1837 // the input width without changing the value produced, eliminate the cast:
1838 //
1839 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1840 //
1841 // We can do this transformation if the constants do not have their sign bits
1842 // set or if it is an equality comparison. Extending a relational comparison
1843 // when we're checking the sign bit would not work.
1844 Value *W;
1845 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1846 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1847 // TODO: Is this a good transform for vectors? Wider types may reduce
1848 // throughput. Should this transform be limited (even for scalars) by using
1849 // shouldChangeType()?
1850 if (!Cmp.getType()->isVectorTy()) {
1851 Type *WideType = W->getType();
1852 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1853 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1854 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1855 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1856 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1857 }
1858 }
1859
1860 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1861 return I;
1862
1863 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1864 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1865 //
1866 // iff pred isn't signed
1867 if (!Cmp.isSigned() && C1.isZero() && And->getOperand(0)->hasOneUse() &&
1868 match(And->getOperand(1), m_One())) {
1869 Constant *One = cast<Constant>(And->getOperand(1));
1870 Value *Or = And->getOperand(0);
1871 Value *A, *B, *LShr;
1872 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1873 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1874 unsigned UsesRemoved = 0;
1875 if (And->hasOneUse())
1876 ++UsesRemoved;
1877 if (Or->hasOneUse())
1878 ++UsesRemoved;
1879 if (LShr->hasOneUse())
1880 ++UsesRemoved;
1881
1882 // Compute A & ((1 << B) | 1)
1883 unsigned RequireUsesRemoved = match(B, m_ImmConstant()) ? 1 : 3;
1884 if (UsesRemoved >= RequireUsesRemoved) {
1885 Value *NewOr =
1886 Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1887 /*HasNUW=*/true),
1888 One, Or->getName());
1889 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1890 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1891 }
1892 }
1893 }
1894
1895 // (icmp eq (and (bitcast X to int), ExponentMask), ExponentMask) -->
1896 // llvm.is.fpclass(X, fcInf|fcNan)
1897 // (icmp ne (and (bitcast X to int), ExponentMask), ExponentMask) -->
1898 // llvm.is.fpclass(X, ~(fcInf|fcNan))
1899 // (icmp eq (and (bitcast X to int), ExponentMask), 0) -->
1900 // llvm.is.fpclass(X, fcSubnormal|fcZero)
1901 // (icmp ne (and (bitcast X to int), ExponentMask), 0) -->
1902 // llvm.is.fpclass(X, ~(fcSubnormal|fcZero))
1903 Value *V;
1904 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1905 Attribute::NoImplicitFloat) &&
1906 Cmp.isEquality() &&
1908 Type *FPType = V->getType()->getScalarType();
1909 if (FPType->isIEEELikeFPTy() && (C1.isZero() || C1 == *C2)) {
1910 APInt ExponentMask =
1911 APFloat::getInf(FPType->getFltSemantics()).bitcastToAPInt();
1912 if (*C2 == ExponentMask) {
1913 unsigned Mask = C1.isZero()
1916 if (isICMP_NE)
1917 Mask = ~Mask & fcAllFlags;
1918 return replaceInstUsesWith(Cmp, Builder.createIsFPClass(V, Mask));
1919 }
1920 }
1921 }
1922
1923 return nullptr;
1924}
1925
1926/// Fold icmp (and X, Y), C.
1929 const APInt &C) {
1930 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1931 return I;
1932
1933 const ICmpInst::Predicate Pred = Cmp.getPredicate();
1934 bool TrueIfNeg;
1935 if (isSignBitCheck(Pred, C, TrueIfNeg)) {
1936 // ((X - 1) & ~X) < 0 --> X == 0
1937 // ((X - 1) & ~X) >= 0 --> X != 0
1938 Value *X;
1939 if (match(And->getOperand(0), m_Add(m_Value(X), m_AllOnes())) &&
1940 match(And->getOperand(1), m_Not(m_Specific(X)))) {
1941 auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1942 return new ICmpInst(NewPred, X, ConstantInt::getNullValue(X->getType()));
1943 }
1944 // (X & -X) < 0 --> X == MinSignedC
1945 // (X & -X) > -1 --> X != MinSignedC
1946 if (match(And, m_c_And(m_Neg(m_Value(X)), m_Deferred(X)))) {
1947 Constant *MinSignedC = ConstantInt::get(
1948 X->getType(),
1949 APInt::getSignedMinValue(X->getType()->getScalarSizeInBits()));
1950 auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1951 return new ICmpInst(NewPred, X, MinSignedC);
1952 }
1953 }
1954
1955 // TODO: These all require that Y is constant too, so refactor with the above.
1956
1957 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1958 Value *X = And->getOperand(0);
1959 Value *Y = And->getOperand(1);
1960 if (auto *C2 = dyn_cast<ConstantInt>(Y))
1961 if (auto *LI = dyn_cast<LoadInst>(X))
1962 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1963 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(LI, GEP, Cmp, C2))
1964 return Res;
1965
1966 if (!Cmp.isEquality())
1967 return nullptr;
1968
1969 // X & -C == -C -> X > u ~C
1970 // X & -C != -C -> X <= u ~C
1971 // iff C is a power of 2
1972 if (Cmp.getOperand(1) == Y && C.isNegatedPowerOf2()) {
1973 auto NewPred =
1975 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1976 }
1977
1978 // ((zext i1 X) & Y) == 0 --> !((trunc Y) & X)
1979 // ((zext i1 X) & Y) != 0 --> ((trunc Y) & X)
1980 // ((zext i1 X) & Y) == 1 --> ((trunc Y) & X)
1981 // ((zext i1 X) & Y) != 1 --> !((trunc Y) & X)
1983 X->getType()->isIntOrIntVectorTy(1) && (C.isZero() || C.isOne())) {
1984 Value *TruncY = Builder.CreateTrunc(Y, X->getType());
1985 if (C.isZero() ^ (Pred == CmpInst::ICMP_NE)) {
1986 Value *And = Builder.CreateAnd(TruncY, X);
1988 }
1989 return BinaryOperator::CreateAnd(TruncY, X);
1990 }
1991
1992 // (icmp eq/ne (and (shl -1, X), Y), 0)
1993 // -> (icmp eq/ne (lshr Y, X), 0)
1994 // We could technically handle any C == 0 or (C < 0 && isOdd(C)) but it seems
1995 // highly unlikely the non-zero case will ever show up in code.
1996 if (C.isZero() &&
1998 m_Value(Y))))) {
1999 Value *LShr = Builder.CreateLShr(Y, X);
2000 return new ICmpInst(Pred, LShr, Constant::getNullValue(LShr->getType()));
2001 }
2002
2003 // (icmp eq/ne (and (add A, Addend), Msk), C)
2004 // -> (icmp eq/ne (and A, Msk), (and (sub C, Addend), Msk))
2005 {
2006 Value *A;
2007 const APInt *Addend, *Msk;
2008 if (match(And, m_And(m_OneUse(m_Add(m_Value(A), m_APInt(Addend))),
2009 m_LowBitMask(Msk))) &&
2010 C.ule(*Msk)) {
2011 APInt NewComperand = (C - *Addend) & *Msk;
2012 Value *MaskA = Builder.CreateAnd(A, ConstantInt::get(A->getType(), *Msk));
2013 return new ICmpInst(Pred, MaskA,
2014 ConstantInt::get(MaskA->getType(), NewComperand));
2015 }
2016 }
2017
2018 return nullptr;
2019}
2020
2021/// Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
2023 InstCombiner::BuilderTy &Builder) {
2024 // Are we using xors or subs to bitwise check for a pair or pairs of
2025 // (in)equalities? Convert to a shorter form that has more potential to be
2026 // folded even further.
2027 // ((X1 ^/- X2) || (X3 ^/- X4)) == 0 --> (X1 == X2) && (X3 == X4)
2028 // ((X1 ^/- X2) || (X3 ^/- X4)) != 0 --> (X1 != X2) || (X3 != X4)
2029 // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) == 0 -->
2030 // (X1 == X2) && (X3 == X4) && (X5 == X6)
2031 // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) != 0 -->
2032 // (X1 != X2) || (X3 != X4) || (X5 != X6)
2034 SmallVector<Value *, 16> WorkList(1, Or);
2035
2036 while (!WorkList.empty()) {
2037 auto MatchOrOperatorArgument = [&](Value *OrOperatorArgument) {
2038 Value *Lhs, *Rhs;
2039
2040 if (match(OrOperatorArgument,
2041 m_OneUse(m_Xor(m_Value(Lhs), m_Value(Rhs))))) {
2042 CmpValues.emplace_back(Lhs, Rhs);
2043 return;
2044 }
2045
2046 if (match(OrOperatorArgument,
2047 m_OneUse(m_Sub(m_Value(Lhs), m_Value(Rhs))))) {
2048 CmpValues.emplace_back(Lhs, Rhs);
2049 return;
2050 }
2051
2052 WorkList.push_back(OrOperatorArgument);
2053 };
2054
2055 Value *CurrentValue = WorkList.pop_back_val();
2056 Value *OrOperatorLhs, *OrOperatorRhs;
2057
2058 if (!match(CurrentValue,
2059 m_Or(m_Value(OrOperatorLhs), m_Value(OrOperatorRhs)))) {
2060 return nullptr;
2061 }
2062
2063 MatchOrOperatorArgument(OrOperatorRhs);
2064 MatchOrOperatorArgument(OrOperatorLhs);
2065 }
2066
2067 ICmpInst::Predicate Pred = Cmp.getPredicate();
2068 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2069 Value *LhsCmp = Builder.CreateICmp(Pred, CmpValues.rbegin()->first,
2070 CmpValues.rbegin()->second);
2071
2072 for (auto It = CmpValues.rbegin() + 1; It != CmpValues.rend(); ++It) {
2073 Value *RhsCmp = Builder.CreateICmp(Pred, It->first, It->second);
2074 LhsCmp = Builder.CreateBinOp(BOpc, LhsCmp, RhsCmp);
2075 }
2076
2077 return LhsCmp;
2078}
2079
2080/// Fold icmp (or X, Y), C.
2083 const APInt &C) {
2084 ICmpInst::Predicate Pred = Cmp.getPredicate();
2085 if (C.isOne()) {
2086 // icmp slt signum(V) 1 --> icmp slt V, 1
2087 Value *V = nullptr;
2088 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
2089 return new ICmpInst(ICmpInst::ICMP_SLT, V,
2090 ConstantInt::get(V->getType(), 1));
2091 }
2092
2093 Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
2094
2095 // (icmp eq/ne (or disjoint x, C0), C1)
2096 // -> (icmp eq/ne x, C0^C1)
2097 if (Cmp.isEquality() && match(OrOp1, m_ImmConstant()) &&
2098 cast<PossiblyDisjointInst>(Or)->isDisjoint()) {
2099 Value *NewC =
2100 Builder.CreateXor(OrOp1, ConstantInt::get(OrOp1->getType(), C));
2101 return new ICmpInst(Pred, OrOp0, NewC);
2102 }
2103
2104 const APInt *MaskC;
2105 if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
2106 if (*MaskC == C && (C + 1).isPowerOf2()) {
2107 // X | C == C --> X <=u C
2108 // X | C != C --> X >u C
2109 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
2111 return new ICmpInst(Pred, OrOp0, OrOp1);
2112 }
2113
2114 // More general: canonicalize 'equality with set bits mask' to
2115 // 'equality with clear bits mask'.
2116 // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
2117 // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
2118 if (Or->hasOneUse()) {
2119 Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
2120 Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
2121 return new ICmpInst(Pred, And, NewC);
2122 }
2123 }
2124
2125 // (X | (X-1)) s< 0 --> X s< 1
2126 // (X | (X-1)) s> -1 --> X s> 0
2127 Value *X;
2128 bool TrueIfSigned;
2129 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
2131 auto NewPred = TrueIfSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGT;
2132 Constant *NewC = ConstantInt::get(X->getType(), TrueIfSigned ? 1 : 0);
2133 return new ICmpInst(NewPred, X, NewC);
2134 }
2135
2136 const APInt *OrC;
2137 // icmp(X | OrC, C) --> icmp(X, 0)
2138 if (C.isNonNegative() && match(Or, m_Or(m_Value(X), m_APInt(OrC)))) {
2139 switch (Pred) {
2140 // X | OrC s< C --> X s< 0 iff OrC s>= C s>= 0
2141 case ICmpInst::ICMP_SLT:
2142 // X | OrC s>= C --> X s>= 0 iff OrC s>= C s>= 0
2143 case ICmpInst::ICMP_SGE:
2144 if (OrC->sge(C))
2145 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2146 break;
2147 // X | OrC s<= C --> X s< 0 iff OrC s> C s>= 0
2148 case ICmpInst::ICMP_SLE:
2149 // X | OrC s> C --> X s>= 0 iff OrC s> C s>= 0
2150 case ICmpInst::ICMP_SGT:
2151 if (OrC->sgt(C))
2153 ConstantInt::getNullValue(X->getType()));
2154 break;
2155 default:
2156 break;
2157 }
2158 }
2159
2160 if (!Cmp.isEquality() || !C.isZero() || !Or->hasOneUse())
2161 return nullptr;
2162
2163 Value *P, *Q;
2165 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
2166 // -> and (icmp eq P, null), (icmp eq Q, null).
2167 Value *CmpP =
2168 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
2169 Value *CmpQ =
2170 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
2171 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2172 return BinaryOperator::Create(BOpc, CmpP, CmpQ);
2173 }
2174
2175 if (Value *V = foldICmpOrXorSubChain(Cmp, Or, Builder))
2176 return replaceInstUsesWith(Cmp, V);
2177
2178 return nullptr;
2179}
2180
2181/// Fold icmp (mul X, Y), C.
2184 const APInt &C) {
2185 ICmpInst::Predicate Pred = Cmp.getPredicate();
2186 Type *MulTy = Mul->getType();
2187 Value *X = Mul->getOperand(0);
2188
2189 // If there's no overflow:
2190 // X * X == 0 --> X == 0
2191 // X * X != 0 --> X != 0
2192 if (Cmp.isEquality() && C.isZero() && X == Mul->getOperand(1) &&
2193 (Mul->hasNoUnsignedWrap() || Mul->hasNoSignedWrap()))
2194 return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2195
2196 const APInt *MulC;
2197 if (!match(Mul->getOperand(1), m_APInt(MulC)))
2198 return nullptr;
2199
2200 // If this is a test of the sign bit and the multiply is sign-preserving with
2201 // a constant operand, use the multiply LHS operand instead:
2202 // (X * +MulC) < 0 --> X < 0
2203 // (X * -MulC) < 0 --> X > 0
2204 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
2205 if (MulC->isNegative())
2206 Pred = ICmpInst::getSwappedPredicate(Pred);
2207 return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2208 }
2209
2210 if (MulC->isZero())
2211 return nullptr;
2212
2213 // If the multiply does not wrap or the constant is odd, try to divide the
2214 // compare constant by the multiplication factor.
2215 if (Cmp.isEquality()) {
2216 // (mul nsw X, MulC) eq/ne C --> X eq/ne C /s MulC
2217 if (Mul->hasNoSignedWrap() && C.srem(*MulC).isZero()) {
2218 Constant *NewC = ConstantInt::get(MulTy, C.sdiv(*MulC));
2219 return new ICmpInst(Pred, X, NewC);
2220 }
2221
2222 // C % MulC == 0 is weaker than we could use if MulC is odd because it
2223 // correct to transform if MulC * N == C including overflow. I.e with i8
2224 // (icmp eq (mul X, 5), 101) -> (icmp eq X, 225) but since 101 % 5 != 0, we
2225 // miss that case.
2226 if (C.urem(*MulC).isZero()) {
2227 // (mul nuw X, MulC) eq/ne C --> X eq/ne C /u MulC
2228 // (mul X, OddC) eq/ne N * C --> X eq/ne N
2229 if ((*MulC & 1).isOne() || Mul->hasNoUnsignedWrap()) {
2230 Constant *NewC = ConstantInt::get(MulTy, C.udiv(*MulC));
2231 return new ICmpInst(Pred, X, NewC);
2232 }
2233 }
2234 }
2235
2236 // With a matching no-overflow guarantee, fold the constants:
2237 // (X * MulC) < C --> X < (C / MulC)
2238 // (X * MulC) > C --> X > (C / MulC)
2239 // TODO: Assert that Pred is not equal to SGE, SLE, UGE, ULE?
2240 Constant *NewC = nullptr;
2241 if (Mul->hasNoSignedWrap() && ICmpInst::isSigned(Pred)) {
2242 // MININT / -1 --> overflow.
2243 if (C.isMinSignedValue() && MulC->isAllOnes())
2244 return nullptr;
2245 if (MulC->isNegative())
2246 Pred = ICmpInst::getSwappedPredicate(Pred);
2247
2248 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2249 NewC = ConstantInt::get(
2251 } else {
2252 assert((Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_SGT) &&
2253 "Unexpected predicate");
2254 NewC = ConstantInt::get(
2256 }
2257 } else if (Mul->hasNoUnsignedWrap() && ICmpInst::isUnsigned(Pred)) {
2258 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) {
2259 NewC = ConstantInt::get(
2261 } else {
2262 assert((Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
2263 "Unexpected predicate");
2264 NewC = ConstantInt::get(
2266 }
2267 }
2268
2269 return NewC ? new ICmpInst(Pred, X, NewC) : nullptr;
2270}
2271
2272/// Fold icmp (shl nuw C2, Y), C.
2274 const APInt &C) {
2275 Value *Y;
2276 const APInt *C2;
2277 if (!match(Shl, m_NUWShl(m_APInt(C2), m_Value(Y))))
2278 return nullptr;
2279
2280 Type *ShiftType = Shl->getType();
2281 unsigned TypeBits = C.getBitWidth();
2282 ICmpInst::Predicate Pred = Cmp.getPredicate();
2283 if (Cmp.isUnsigned()) {
2284 if (C2->isZero() || C2->ugt(C))
2285 return nullptr;
2286 APInt Div, Rem;
2287 APInt::udivrem(C, *C2, Div, Rem);
2288 bool CIsPowerOf2 = Rem.isZero() && Div.isPowerOf2();
2289
2290 // (1 << Y) pred C -> Y pred Log2(C)
2291 if (!CIsPowerOf2) {
2292 // (1 << Y) < 30 -> Y <= 4
2293 // (1 << Y) <= 30 -> Y <= 4
2294 // (1 << Y) >= 30 -> Y > 4
2295 // (1 << Y) > 30 -> Y > 4
2296 if (Pred == ICmpInst::ICMP_ULT)
2297 Pred = ICmpInst::ICMP_ULE;
2298 else if (Pred == ICmpInst::ICMP_UGE)
2299 Pred = ICmpInst::ICMP_UGT;
2300 }
2301
2302 unsigned CLog2 = Div.logBase2();
2303 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
2304 } else if (Cmp.isSigned() && C2->isOne()) {
2305 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2306 // (1 << Y) > 0 -> Y != 31
2307 // (1 << Y) > C -> Y != 31 if C is negative.
2308 if (Pred == ICmpInst::ICMP_SGT && C.sle(0))
2309 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2310
2311 // (1 << Y) < 0 -> Y == 31
2312 // (1 << Y) < 1 -> Y == 31
2313 // (1 << Y) < C -> Y == 31 if C is negative and not signed min.
2314 // Exclude signed min by subtracting 1 and lower the upper bound to 0.
2315 if (Pred == ICmpInst::ICMP_SLT && (C - 1).sle(0))
2316 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2317 }
2318
2319 return nullptr;
2320}
2321
2322/// Fold icmp (shl X, Y), C.
2324 BinaryOperator *Shl,
2325 const APInt &C) {
2326 const APInt *ShiftVal;
2327 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2328 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2329
2330 ICmpInst::Predicate Pred = Cmp.getPredicate();
2331 // (icmp pred (shl nuw&nsw X, Y), Csle0)
2332 // -> (icmp pred X, Csle0)
2333 //
2334 // The idea is the nuw/nsw essentially freeze the sign bit for the shift op
2335 // so X's must be what is used.
2336 if (C.sle(0) && Shl->hasNoUnsignedWrap() && Shl->hasNoSignedWrap())
2337 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2338
2339 // (icmp eq/ne (shl nuw|nsw X, Y), 0)
2340 // -> (icmp eq/ne X, 0)
2341 if (ICmpInst::isEquality(Pred) && C.isZero() &&
2342 (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap()))
2343 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2344
2345 // (icmp slt (shl nsw X, Y), 0/1)
2346 // -> (icmp slt X, 0/1)
2347 // (icmp sgt (shl nsw X, Y), 0/-1)
2348 // -> (icmp sgt X, 0/-1)
2349 //
2350 // NB: sge/sle with a constant will canonicalize to sgt/slt.
2351 if (Shl->hasNoSignedWrap() &&
2352 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT))
2353 if (C.isZero() || (Pred == ICmpInst::ICMP_SGT ? C.isAllOnes() : C.isOne()))
2354 return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2355
2356 const APInt *ShiftAmt;
2357 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2358 return foldICmpShlLHSC(Cmp, Shl, C);
2359
2360 // Check that the shift amount is in range. If not, don't perform undefined
2361 // shifts. When the shift is visited, it will be simplified.
2362 unsigned TypeBits = C.getBitWidth();
2363 if (ShiftAmt->uge(TypeBits))
2364 return nullptr;
2365
2366 Value *X = Shl->getOperand(0);
2367 Type *ShType = Shl->getType();
2368
2369 // NSW guarantees that we are only shifting out sign bits from the high bits,
2370 // so we can ASHR the compare constant without needing a mask and eliminate
2371 // the shift.
2372 if (Shl->hasNoSignedWrap()) {
2373 if (Pred == ICmpInst::ICMP_SGT) {
2374 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2375 APInt ShiftedC = C.ashr(*ShiftAmt);
2376 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2377 }
2378 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2379 C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2380 APInt ShiftedC = C.ashr(*ShiftAmt);
2381 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2382 }
2383 if (Pred == ICmpInst::ICMP_SLT) {
2384 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2385 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2386 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2387 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2388 assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2389 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2390 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2391 }
2392 }
2393
2394 // NUW guarantees that we are only shifting out zero bits from the high bits,
2395 // so we can LSHR the compare constant without needing a mask and eliminate
2396 // the shift.
2397 if (Shl->hasNoUnsignedWrap()) {
2398 if (Pred == ICmpInst::ICMP_UGT) {
2399 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2400 APInt ShiftedC = C.lshr(*ShiftAmt);
2401 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2402 }
2403 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2404 C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2405 APInt ShiftedC = C.lshr(*ShiftAmt);
2406 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2407 }
2408 if (Pred == ICmpInst::ICMP_ULT) {
2409 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2410 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2411 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2412 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2413 assert(C.ugt(0) && "ult 0 should have been eliminated");
2414 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2415 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2416 }
2417 }
2418
2419 if (Cmp.isEquality() && Shl->hasOneUse()) {
2420 // Strength-reduce the shift into an 'and'.
2421 Constant *Mask = ConstantInt::get(
2422 ShType,
2423 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2424 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2425 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2426 return new ICmpInst(Pred, And, LShrC);
2427 }
2428
2429 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2430 bool TrueIfSigned = false;
2431 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2432 // (X << 31) <s 0 --> (X & 1) != 0
2433 Constant *Mask = ConstantInt::get(
2434 ShType,
2435 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2436 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2437 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2438 And, Constant::getNullValue(ShType));
2439 }
2440
2441 // Simplify 'shl' inequality test into 'and' equality test.
2442 if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2443 // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2444 if ((C + 1).isPowerOf2() &&
2445 (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2446 Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2447 return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2449 And, Constant::getNullValue(ShType));
2450 }
2451 // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2452 if (C.isPowerOf2() &&
2453 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2454 Value *And =
2455 Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2456 return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2458 And, Constant::getNullValue(ShType));
2459 }
2460 }
2461
2462 // Transform (icmp pred iM (shl iM %v, N), C)
2463 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2464 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2465 // This enables us to get rid of the shift in favor of a trunc that may be
2466 // free on the target. It has the additional benefit of comparing to a
2467 // smaller constant that may be more target-friendly.
2468 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2469 if (Shl->hasOneUse() && Amt != 0 &&
2470 shouldChangeType(ShType->getScalarSizeInBits(), TypeBits - Amt)) {
2471 ICmpInst::Predicate CmpPred = Pred;
2472 APInt RHSC = C;
2473
2474 if (RHSC.countr_zero() < Amt && ICmpInst::isStrictPredicate(CmpPred)) {
2475 // Try the flipped strictness predicate.
2476 // e.g.:
2477 // icmp ult i64 (shl X, 32), 8589934593 ->
2478 // icmp ule i64 (shl X, 32), 8589934592 ->
2479 // icmp ule i32 (trunc X, i32), 2 ->
2480 // icmp ult i32 (trunc X, i32), 3
2481 if (auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(
2482 Pred, ConstantInt::get(ShType->getContext(), C))) {
2483 CmpPred = FlippedStrictness->first;
2484 RHSC = cast<ConstantInt>(FlippedStrictness->second)->getValue();
2485 }
2486 }
2487
2488 if (RHSC.countr_zero() >= Amt) {
2489 Type *TruncTy = ShType->getWithNewBitWidth(TypeBits - Amt);
2490 Constant *NewC =
2491 ConstantInt::get(TruncTy, RHSC.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2492 return new ICmpInst(CmpPred,
2493 Builder.CreateTrunc(X, TruncTy, "", /*IsNUW=*/false,
2494 Shl->hasNoSignedWrap()),
2495 NewC);
2496 }
2497 }
2498
2499 return nullptr;
2500}
2501
2502/// Fold icmp ({al}shr X, Y), C.
2504 BinaryOperator *Shr,
2505 const APInt &C) {
2506 // An exact shr only shifts out zero bits, so:
2507 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2508 Value *X = Shr->getOperand(0);
2509 CmpInst::Predicate Pred = Cmp.getPredicate();
2510 if (Cmp.isEquality() && Shr->isExact() && C.isZero())
2511 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2512
2513 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2514 const APInt *ShiftValC;
2515 if (match(X, m_APInt(ShiftValC))) {
2516 if (Cmp.isEquality())
2517 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftValC);
2518
2519 // (ShiftValC >> Y) >s -1 --> Y != 0 with ShiftValC < 0
2520 // (ShiftValC >> Y) <s 0 --> Y == 0 with ShiftValC < 0
2521 bool TrueIfSigned;
2522 if (!IsAShr && ShiftValC->isNegative() &&
2523 isSignBitCheck(Pred, C, TrueIfSigned))
2524 return new ICmpInst(TrueIfSigned ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE,
2525 Shr->getOperand(1),
2526 ConstantInt::getNullValue(X->getType()));
2527
2528 // If the shifted constant is a power-of-2, test the shift amount directly:
2529 // (ShiftValC >> Y) >u C --> X <u (LZ(C) - LZ(ShiftValC))
2530 // (ShiftValC >> Y) <u C --> X >=u (LZ(C-1) - LZ(ShiftValC))
2531 if (!IsAShr && ShiftValC->isPowerOf2() &&
2532 (Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_ULT)) {
2533 bool IsUGT = Pred == CmpInst::ICMP_UGT;
2534 assert(ShiftValC->uge(C) && "Expected simplify of compare");
2535 assert((IsUGT || !C.isZero()) && "Expected X u< 0 to simplify");
2536
2537 unsigned CmpLZ = IsUGT ? C.countl_zero() : (C - 1).countl_zero();
2538 unsigned ShiftLZ = ShiftValC->countl_zero();
2539 Constant *NewC = ConstantInt::get(Shr->getType(), CmpLZ - ShiftLZ);
2540 auto NewPred = IsUGT ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
2541 return new ICmpInst(NewPred, Shr->getOperand(1), NewC);
2542 }
2543 }
2544
2545 const APInt *ShiftAmtC;
2546 if (!match(Shr->getOperand(1), m_APInt(ShiftAmtC)))
2547 return nullptr;
2548
2549 // Check that the shift amount is in range. If not, don't perform undefined
2550 // shifts. When the shift is visited it will be simplified.
2551 unsigned TypeBits = C.getBitWidth();
2552 unsigned ShAmtVal = ShiftAmtC->getLimitedValue(TypeBits);
2553 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2554 return nullptr;
2555
2556 bool IsExact = Shr->isExact();
2557 Type *ShrTy = Shr->getType();
2558 // TODO: If we could guarantee that InstSimplify would handle all of the
2559 // constant-value-based preconditions in the folds below, then we could assert
2560 // those conditions rather than checking them. This is difficult because of
2561 // undef/poison (PR34838).
2562 if (IsAShr && Shr->hasOneUse()) {
2563 if (IsExact && (Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) &&
2564 (C - 1).isPowerOf2() && C.countLeadingZeros() > ShAmtVal) {
2565 // When C - 1 is a power of two and the transform can be legally
2566 // performed, prefer this form so the produced constant is close to a
2567 // power of two.
2568 // icmp slt/ult (ashr exact X, ShAmtC), C
2569 // --> icmp slt/ult X, (C - 1) << ShAmtC) + 1
2570 APInt ShiftedC = (C - 1).shl(ShAmtVal) + 1;
2571 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2572 }
2573 if (IsExact || Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) {
2574 // When ShAmtC can be shifted losslessly:
2575 // icmp PRED (ashr exact X, ShAmtC), C --> icmp PRED X, (C << ShAmtC)
2576 // icmp slt/ult (ashr X, ShAmtC), C --> icmp slt/ult X, (C << ShAmtC)
2577 APInt ShiftedC = C.shl(ShAmtVal);
2578 if (ShiftedC.ashr(ShAmtVal) == C)
2579 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2580 }
2581 if (Pred == CmpInst::ICMP_SGT) {
2582 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2583 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2584 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2585 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2586 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2587 }
2588 if (Pred == CmpInst::ICMP_UGT) {
2589 // icmp ugt (ashr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2590 // 'C + 1 << ShAmtC' can overflow as a signed number, so the 2nd
2591 // clause accounts for that pattern.
2592 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2593 if ((ShiftedC + 1).ashr(ShAmtVal) == (C + 1) ||
2594 (C + 1).shl(ShAmtVal).isMinSignedValue())
2595 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2596 }
2597
2598 // If the compare constant has significant bits above the lowest sign-bit,
2599 // then convert an unsigned cmp to a test of the sign-bit:
2600 // (ashr X, ShiftC) u> C --> X s< 0
2601 // (ashr X, ShiftC) u< C --> X s> -1
2602 if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) {
2603 if (Pred == CmpInst::ICMP_UGT) {
2604 return new ICmpInst(CmpInst::ICMP_SLT, X,
2606 }
2607 if (Pred == CmpInst::ICMP_ULT) {
2608 return new ICmpInst(CmpInst::ICMP_SGT, X,
2610 }
2611 }
2612 } else if (!IsAShr) {
2613 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2614 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2615 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2616 APInt ShiftedC = C.shl(ShAmtVal);
2617 if (ShiftedC.lshr(ShAmtVal) == C)
2618 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2619 }
2620 if (Pred == CmpInst::ICMP_UGT) {
2621 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2622 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2623 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2624 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2625 }
2626 }
2627
2628 if (!Cmp.isEquality())
2629 return nullptr;
2630
2631 // Handle equality comparisons of shift-by-constant.
2632
2633 // If the comparison constant changes with the shift, the comparison cannot
2634 // succeed (bits of the comparison constant cannot match the shifted value).
2635 // This should be known by InstSimplify and already be folded to true/false.
2636 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2637 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2638 "Expected icmp+shr simplify did not occur.");
2639
2640 // If the bits shifted out are known zero, compare the unshifted value:
2641 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2642 if (Shr->isExact())
2643 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2644
2645 if (Shr->hasOneUse()) {
2646 // Canonicalize the shift into an 'and':
2647 // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2648 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2649 Constant *Mask = ConstantInt::get(ShrTy, Val);
2650 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2651 return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2652 }
2653
2654 return nullptr;
2655}
2656
2658 BinaryOperator *SRem,
2659 const APInt &C) {
2660 const ICmpInst::Predicate Pred = Cmp.getPredicate();
2661 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT) {
2662 // Canonicalize unsigned predicates to signed:
2663 // (X s% DivisorC) u> C -> (X s% DivisorC) s< 0
2664 // iff (C s< 0 ? ~C : C) u>= abs(DivisorC)-1
2665 // (X s% DivisorC) u< C+1 -> (X s% DivisorC) s> -1
2666 // iff (C+1 s< 0 ? ~C : C) u>= abs(DivisorC)-1
2667
2668 const APInt *DivisorC;
2669 if (!match(SRem->getOperand(1), m_APInt(DivisorC)))
2670 return nullptr;
2671
2672 APInt NormalizedC = C;
2673 if (Pred == ICmpInst::ICMP_ULT) {
2674 assert(!NormalizedC.isZero() &&
2675 "ult X, 0 should have been simplified already.");
2676 --NormalizedC;
2677 }
2678 if (C.isNegative())
2679 NormalizedC.flipAllBits();
2680 assert(!DivisorC->isZero() &&
2681 "srem X, 0 should have been simplified already.");
2682 if (!NormalizedC.uge(DivisorC->abs() - 1))
2683 return nullptr;
2684
2685 Type *Ty = SRem->getType();
2686 if (Pred == ICmpInst::ICMP_UGT)
2687 return new ICmpInst(ICmpInst::ICMP_SLT, SRem,
2689 return new ICmpInst(ICmpInst::ICMP_SGT, SRem,
2691 }
2692 // Match an 'is positive' or 'is negative' comparison of remainder by a
2693 // constant power-of-2 value:
2694 // (X % pow2C) sgt/slt 0
2695 if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT &&
2696 Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2697 return nullptr;
2698
2699 // TODO: The one-use check is standard because we do not typically want to
2700 // create longer instruction sequences, but this might be a special-case
2701 // because srem is not good for analysis or codegen.
2702 if (!SRem->hasOneUse())
2703 return nullptr;
2704
2705 const APInt *DivisorC;
2706 if (!match(SRem->getOperand(1), m_Power2(DivisorC)))
2707 return nullptr;
2708
2709 // For cmp_sgt/cmp_slt only zero valued C is handled.
2710 // For cmp_eq/cmp_ne only positive valued C is handled.
2711 if (((Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT) &&
2712 !C.isZero()) ||
2713 ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2714 !C.isStrictlyPositive()))
2715 return nullptr;
2716
2717 // Mask off the sign bit and the modulo bits (low-bits).
2718 Type *Ty = SRem->getType();
2719 APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2720 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2721 Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2722
2723 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)
2724 return new ICmpInst(Pred, And, ConstantInt::get(Ty, C));
2725
2726 // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2727 // bit is set. Example:
2728 // (i8 X % 32) s> 0 --> (X & 159) s> 0
2729 if (Pred == ICmpInst::ICMP_SGT)
2731
2732 // For 'is negative?' check that the sign-bit is set and at least 1 masked
2733 // bit is set. Example:
2734 // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2735 return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2736}
2737
2738/// Fold icmp (udiv X, Y), C.
2740 BinaryOperator *UDiv,
2741 const APInt &C) {
2742 ICmpInst::Predicate Pred = Cmp.getPredicate();
2743 Value *X = UDiv->getOperand(0);
2744 Value *Y = UDiv->getOperand(1);
2745 Type *Ty = UDiv->getType();
2746
2747 const APInt *C2;
2748 if (!match(X, m_APInt(C2)))
2749 return nullptr;
2750
2751 assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2752
2753 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2754 if (Pred == ICmpInst::ICMP_UGT) {
2755 assert(!C.isMaxValue() &&
2756 "icmp ugt X, UINT_MAX should have been simplified already.");
2757 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2758 ConstantInt::get(Ty, C2->udiv(C + 1)));
2759 }
2760
2761 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2762 if (Pred == ICmpInst::ICMP_ULT) {
2763 assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2764 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2765 ConstantInt::get(Ty, C2->udiv(C)));
2766 }
2767
2768 return nullptr;
2769}
2770
2771/// Fold icmp ({su}div X, Y), C.
2773 BinaryOperator *Div,
2774 const APInt &C) {
2775 ICmpInst::Predicate Pred = Cmp.getPredicate();
2776 Value *X = Div->getOperand(0);
2777 Value *Y = Div->getOperand(1);
2778 Type *Ty = Div->getType();
2779 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2780
2781 // If unsigned division and the compare constant is bigger than
2782 // UMAX/2 (negative), there's only one pair of values that satisfies an
2783 // equality check, so eliminate the division:
2784 // (X u/ Y) == C --> (X == C) && (Y == 1)
2785 // (X u/ Y) != C --> (X != C) || (Y != 1)
2786 // Similarly, if signed division and the compare constant is exactly SMIN:
2787 // (X s/ Y) == SMIN --> (X == SMIN) && (Y == 1)
2788 // (X s/ Y) != SMIN --> (X != SMIN) || (Y != 1)
2789 if (Cmp.isEquality() && Div->hasOneUse() && C.isSignBitSet() &&
2790 (!DivIsSigned || C.isMinSignedValue())) {
2791 Value *XBig = Builder.CreateICmp(Pred, X, ConstantInt::get(Ty, C));
2792 Value *YOne = Builder.CreateICmp(Pred, Y, ConstantInt::get(Ty, 1));
2793 auto Logic = Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2794 return BinaryOperator::Create(Logic, XBig, YOne);
2795 }
2796
2797 // Fold: icmp pred ([us]div X, C2), C -> range test
2798 // Fold this div into the comparison, producing a range check.
2799 // Determine, based on the divide type, what the range is being
2800 // checked. If there is an overflow on the low or high side, remember
2801 // it, otherwise compute the range [low, hi) bounding the new value.
2802 // See: InsertRangeTest above for the kinds of replacements possible.
2803 const APInt *C2;
2804 if (!match(Y, m_APInt(C2)))
2805 return nullptr;
2806
2807 // FIXME: If the operand types don't match the type of the divide
2808 // then don't attempt this transform. The code below doesn't have the
2809 // logic to deal with a signed divide and an unsigned compare (and
2810 // vice versa). This is because (x /s C2) <s C produces different
2811 // results than (x /s C2) <u C or (x /u C2) <s C or even
2812 // (x /u C2) <u C. Simply casting the operands and result won't
2813 // work. :( The if statement below tests that condition and bails
2814 // if it finds it.
2815 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2816 return nullptr;
2817
2818 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2819 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2820 // division-by-constant cases should be present, we can not assert that they
2821 // have happened before we reach this icmp instruction.
2822 if (C2->isZero() || C2->isOne() || (DivIsSigned && C2->isAllOnes()))
2823 return nullptr;
2824
2825 // Compute Prod = C * C2. We are essentially solving an equation of
2826 // form X / C2 = C. We solve for X by multiplying C2 and C.
2827 // By solving for X, we can turn this into a range check instead of computing
2828 // a divide.
2829 APInt Prod = C * *C2;
2830
2831 // Determine if the product overflows by seeing if the product is not equal to
2832 // the divide. Make sure we do the same kind of divide as in the LHS
2833 // instruction that we're folding.
2834 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2835
2836 // If the division is known to be exact, then there is no remainder from the
2837 // divide, so the covered range size is unit, otherwise it is the divisor.
2838 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2839
2840 // Figure out the interval that is being checked. For example, a comparison
2841 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2842 // Compute this interval based on the constants involved and the signedness of
2843 // the compare/divide. This computes a half-open interval, keeping track of
2844 // whether either value in the interval overflows. After analysis each
2845 // overflow variable is set to 0 if it's corresponding bound variable is valid
2846 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2847 int LoOverflow = 0, HiOverflow = 0;
2848 APInt LoBound, HiBound;
2849
2850 if (!DivIsSigned) { // udiv
2851 // e.g. X/5 op 3 --> [15, 20)
2852 LoBound = Prod;
2853 HiOverflow = LoOverflow = ProdOV;
2854 if (!HiOverflow) {
2855 // If this is not an exact divide, then many values in the range collapse
2856 // to the same result value.
2857 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2858 }
2859 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2860 if (C.isZero()) { // (X / pos) op 0
2861 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2862 LoBound = -(RangeSize - 1);
2863 HiBound = RangeSize;
2864 } else if (C.isStrictlyPositive()) { // (X / pos) op pos
2865 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2866 HiOverflow = LoOverflow = ProdOV;
2867 if (!HiOverflow)
2868 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2869 } else { // (X / pos) op neg
2870 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2871 HiBound = Prod + 1;
2872 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2873 if (!LoOverflow) {
2874 APInt DivNeg = -RangeSize;
2875 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2876 }
2877 }
2878 } else if (C2->isNegative()) { // Divisor is < 0.
2879 if (Div->isExact())
2880 RangeSize.negate();
2881 if (C.isZero()) { // (X / neg) op 0
2882 // e.g. X/-5 op 0 --> [-4, 5)
2883 LoBound = RangeSize + 1;
2884 HiBound = -RangeSize;
2885 if (HiBound == *C2) { // -INTMIN = INTMIN
2886 HiOverflow = 1; // [INTMIN+1, overflow)
2887 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN
2888 }
2889 } else if (C.isStrictlyPositive()) { // (X / neg) op pos
2890 // e.g. X/-5 op 3 --> [-19, -14)
2891 HiBound = Prod + 1;
2892 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2893 if (!LoOverflow)
2894 LoOverflow =
2895 addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1 : 0;
2896 } else { // (X / neg) op neg
2897 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2898 LoOverflow = HiOverflow = ProdOV;
2899 if (!HiOverflow)
2900 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2901 }
2902
2903 // Dividing by a negative swaps the condition. LT <-> GT
2904 Pred = ICmpInst::getSwappedPredicate(Pred);
2905 }
2906
2907 switch (Pred) {
2908 default:
2909 llvm_unreachable("Unhandled icmp predicate!");
2910 case ICmpInst::ICMP_EQ:
2911 if (LoOverflow && HiOverflow)
2912 return replaceInstUsesWith(Cmp, Builder.getFalse());
2913 if (HiOverflow)
2914 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2915 X, ConstantInt::get(Ty, LoBound));
2916 if (LoOverflow)
2917 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2918 X, ConstantInt::get(Ty, HiBound));
2919 return replaceInstUsesWith(
2920 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2921 case ICmpInst::ICMP_NE:
2922 if (LoOverflow && HiOverflow)
2923 return replaceInstUsesWith(Cmp, Builder.getTrue());
2924 if (HiOverflow)
2925 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2926 X, ConstantInt::get(Ty, LoBound));
2927 if (LoOverflow)
2928 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2929 X, ConstantInt::get(Ty, HiBound));
2930 return replaceInstUsesWith(
2931 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, false));
2932 case ICmpInst::ICMP_ULT:
2933 case ICmpInst::ICMP_SLT:
2934 if (LoOverflow == +1) // Low bound is greater than input range.
2935 return replaceInstUsesWith(Cmp, Builder.getTrue());
2936 if (LoOverflow == -1) // Low bound is less than input range.
2937 return replaceInstUsesWith(Cmp, Builder.getFalse());
2938 return new ICmpInst(Pred, X, ConstantInt::get(Ty, LoBound));
2939 case ICmpInst::ICMP_UGT:
2940 case ICmpInst::ICMP_SGT:
2941 if (HiOverflow == +1) // High bound greater than input range.
2942 return replaceInstUsesWith(Cmp, Builder.getFalse());
2943 if (HiOverflow == -1) // High bound less than input range.
2944 return replaceInstUsesWith(Cmp, Builder.getTrue());
2945 if (Pred == ICmpInst::ICMP_UGT)
2946 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, HiBound));
2947 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, HiBound));
2948 }
2949
2950 return nullptr;
2951}
2952
2953/// Fold icmp (sub X, Y), C.
2956 const APInt &C) {
2957 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2958 ICmpInst::Predicate Pred = Cmp.getPredicate();
2959 Type *Ty = Sub->getType();
2960
2961 // (SubC - Y) == C) --> Y == (SubC - C)
2962 // (SubC - Y) != C) --> Y != (SubC - C)
2963 Constant *SubC;
2964 if (Cmp.isEquality() && match(X, m_ImmConstant(SubC))) {
2965 return new ICmpInst(Pred, Y,
2966 ConstantExpr::getSub(SubC, ConstantInt::get(Ty, C)));
2967 }
2968
2969 // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2970 const APInt *C2;
2971 APInt SubResult;
2972 ICmpInst::Predicate SwappedPred = Cmp.getSwappedPredicate();
2973 bool HasNSW = Sub->hasNoSignedWrap();
2974 bool HasNUW = Sub->hasNoUnsignedWrap();
2975 if (match(X, m_APInt(C2)) &&
2976 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2977 !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2978 return new ICmpInst(SwappedPred, Y, ConstantInt::get(Ty, SubResult));
2979
2980 // X - Y == 0 --> X == Y.
2981 // X - Y != 0 --> X != Y.
2982 // TODO: We allow this with multiple uses as long as the other uses are not
2983 // in phis. The phi use check is guarding against a codegen regression
2984 // for a loop test. If the backend could undo this (and possibly
2985 // subsequent transforms), we would not need this hack.
2986 if (Cmp.isEquality() && C.isZero() &&
2987 none_of((Sub->users()), [](const User *U) { return isa<PHINode>(U); }))
2988 return new ICmpInst(Pred, X, Y);
2989
2990 // The following transforms are only worth it if the only user of the subtract
2991 // is the icmp.
2992 // TODO: This is an artificial restriction for all of the transforms below
2993 // that only need a single replacement icmp. Can these use the phi test
2994 // like the transform above here?
2995 if (!Sub->hasOneUse())
2996 return nullptr;
2997
2998 if (Sub->hasNoSignedWrap()) {
2999 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
3000 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
3001 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3002
3003 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
3004 if (Pred == ICmpInst::ICMP_SGT && C.isZero())
3005 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3006
3007 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
3008 if (Pred == ICmpInst::ICMP_SLT && C.isZero())
3009 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3010
3011 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
3012 if (Pred == ICmpInst::ICMP_SLT && C.isOne())
3013 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3014 }
3015
3016 if (!match(X, m_APInt(C2)))
3017 return nullptr;
3018
3019 // C2 - Y <u C -> (Y | (C - 1)) == C2
3020 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
3021 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
3022 (*C2 & (C - 1)) == (C - 1))
3023 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
3024
3025 // C2 - Y >u C -> (Y | C) != C2
3026 // iff C2 & C == C and C + 1 is a power of 2
3027 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
3028 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
3029
3030 // We have handled special cases that reduce.
3031 // Canonicalize any remaining sub to add as:
3032 // (C2 - Y) > C --> (Y + ~C2) < ~C
3033 Value *Add = Builder.CreateAdd(Y, ConstantInt::get(Ty, ~(*C2)), "notsub",
3034 HasNUW, HasNSW);
3035 return new ICmpInst(SwappedPred, Add, ConstantInt::get(Ty, ~C));
3036}
3037
3038static Value *createLogicFromTable(const std::bitset<4> &Table, Value *Op0,
3039 Value *Op1, IRBuilderBase &Builder,
3040 bool HasOneUse) {
3041 auto FoldConstant = [&](bool Val) {
3042 Constant *Res = Val ? Builder.getTrue() : Builder.getFalse();
3043 if (Op0->getType()->isVectorTy())
3045 cast<VectorType>(Op0->getType())->getElementCount(), Res);
3046 return Res;
3047 };
3048
3049 switch (Table.to_ulong()) {
3050 case 0: // 0 0 0 0
3051 return FoldConstant(false);
3052 case 1: // 0 0 0 1
3053 return HasOneUse ? Builder.CreateNot(Builder.CreateOr(Op0, Op1)) : nullptr;
3054 case 2: // 0 0 1 0
3055 return HasOneUse ? Builder.CreateAnd(Builder.CreateNot(Op0), Op1) : nullptr;
3056 case 3: // 0 0 1 1
3057 return Builder.CreateNot(Op0);
3058 case 4: // 0 1 0 0
3059 return HasOneUse ? Builder.CreateAnd(Op0, Builder.CreateNot(Op1)) : nullptr;
3060 case 5: // 0 1 0 1
3061 return Builder.CreateNot(Op1);
3062 case 6: // 0 1 1 0
3063 return Builder.CreateXor(Op0, Op1);
3064 case 7: // 0 1 1 1
3065 return HasOneUse ? Builder.CreateNot(Builder.CreateAnd(Op0, Op1)) : nullptr;
3066 case 8: // 1 0 0 0
3067 return Builder.CreateAnd(Op0, Op1);
3068 case 9: // 1 0 0 1
3069 return HasOneUse ? Builder.CreateNot(Builder.CreateXor(Op0, Op1)) : nullptr;
3070 case 10: // 1 0 1 0
3071 return Op1;
3072 case 11: // 1 0 1 1
3073 return HasOneUse ? Builder.CreateOr(Builder.CreateNot(Op0), Op1) : nullptr;
3074 case 12: // 1 1 0 0
3075 return Op0;
3076 case 13: // 1 1 0 1
3077 return HasOneUse ? Builder.CreateOr(Op0, Builder.CreateNot(Op1)) : nullptr;
3078 case 14: // 1 1 1 0
3079 return Builder.CreateOr(Op0, Op1);
3080 case 15: // 1 1 1 1
3081 return FoldConstant(true);
3082 default:
3083 llvm_unreachable("Invalid Operation");
3084 }
3085 return nullptr;
3086}
3087
3089 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3090 Value *A, *B;
3091 Constant *C1, *C2, *C3, *C4;
3092 if (!(match(BO->getOperand(0),
3093 m_Select(m_Value(A), m_Constant(C1), m_Constant(C2)))) ||
3094 !match(BO->getOperand(1),
3095 m_Select(m_Value(B), m_Constant(C3), m_Constant(C4))) ||
3096 Cmp.getType() != A->getType())
3097 return nullptr;
3098
3099 std::bitset<4> Table;
3100 auto ComputeTable = [&](bool First, bool Second) -> std::optional<bool> {
3101 Constant *L = First ? C1 : C2;
3102 Constant *R = Second ? C3 : C4;
3103 if (auto *Res = ConstantFoldBinaryOpOperands(BO->getOpcode(), L, R, DL)) {
3104 auto *Val = Res->getType()->isVectorTy() ? Res->getSplatValue() : Res;
3105 if (auto *CI = dyn_cast_or_null<ConstantInt>(Val))
3106 return ICmpInst::compare(CI->getValue(), C, Cmp.getPredicate());
3107 }
3108 return std::nullopt;
3109 };
3110
3111 for (unsigned I = 0; I < 4; ++I) {
3112 bool First = (I >> 1) & 1;
3113 bool Second = I & 1;
3114 if (auto Res = ComputeTable(First, Second))
3115 Table[I] = *Res;
3116 else
3117 return nullptr;
3118 }
3119
3120 // Synthesize optimal logic.
3121 if (auto *Cond = createLogicFromTable(Table, A, B, Builder, BO->hasOneUse()))
3122 return replaceInstUsesWith(Cmp, Cond);
3123 return nullptr;
3124}
3125
3126/// Fold icmp (add X, Y), C.
3129 const APInt &C) {
3130 Value *Y = Add->getOperand(1);
3131 Value *X = Add->getOperand(0);
3132
3133 Value *Op0, *Op1;
3134 Instruction *Ext0, *Ext1;
3135 const CmpPredicate Pred = Cmp.getCmpPredicate();
3136 if (match(Add,
3139 m_ZExtOrSExt(m_Value(Op1))))) &&
3140 Op0->getType()->isIntOrIntVectorTy(1) &&
3141 Op1->getType()->isIntOrIntVectorTy(1)) {
3142 unsigned BW = C.getBitWidth();
3143 std::bitset<4> Table;
3144 auto ComputeTable = [&](bool Op0Val, bool Op1Val) {
3145 APInt Res(BW, 0);
3146 if (Op0Val)
3147 Res += APInt(BW, isa<ZExtInst>(Ext0) ? 1 : -1, /*isSigned=*/true);
3148 if (Op1Val)
3149 Res += APInt(BW, isa<ZExtInst>(Ext1) ? 1 : -1, /*isSigned=*/true);
3150 return ICmpInst::compare(Res, C, Pred);
3151 };
3152
3153 Table[0] = ComputeTable(false, false);
3154 Table[1] = ComputeTable(false, true);
3155 Table[2] = ComputeTable(true, false);
3156 Table[3] = ComputeTable(true, true);
3157 if (auto *Cond =
3158 createLogicFromTable(Table, Op0, Op1, Builder, Add->hasOneUse()))
3159 return replaceInstUsesWith(Cmp, Cond);
3160 }
3161 const APInt *C2;
3162 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
3163 return nullptr;
3164
3165 // Fold icmp pred (add X, C2), C.
3166 Type *Ty = Add->getType();
3167
3168 // If the add does not wrap, we can always adjust the compare by subtracting
3169 // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
3170 // have been canonicalized to SGT/SLT/UGT/ULT.
3171 if (Add->hasNoUnsignedWrap() &&
3172 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT)) {
3173 bool Overflow;
3174 APInt NewC = C.usub_ov(*C2, Overflow);
3175 // If there is overflow, the result must be true or false.
3176 if (!Overflow)
3177 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
3178 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
3179 }
3180
3181 CmpInst::Predicate ChosenPred = Pred.getPreferredSignedPredicate();
3182
3183 if (Add->hasNoSignedWrap() &&
3184 (ChosenPred == ICmpInst::ICMP_SGT || ChosenPred == ICmpInst::ICMP_SLT)) {
3185 bool Overflow;
3186 APInt NewC = C.ssub_ov(*C2, Overflow);
3187 if (!Overflow)
3188 // icmp samesign ugt/ult (add nsw X, C2), C
3189 // -> icmp sgt/slt X, (C - C2)
3190 return new ICmpInst(ChosenPred, X, ConstantInt::get(Ty, NewC));
3191 }
3192
3193 if (ICmpInst::isUnsigned(Pred) && Add->hasNoSignedWrap() &&
3194 C.isNonNegative() && (C - *C2).isNonNegative() &&
3195 computeConstantRange(X, /*ForSigned=*/true).add(*C2).isAllNonNegative())
3196 return new ICmpInst(ICmpInst::getSignedPredicate(Pred), X,
3197 ConstantInt::get(Ty, C - *C2));
3198
3199 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
3200 const APInt &Upper = CR.getUpper();
3201 const APInt &Lower = CR.getLower();
3202 if (Cmp.isSigned()) {
3203 if (Lower.isSignMask())
3204 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
3205 if (Upper.isSignMask())
3206 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
3207 } else {
3208 if (Lower.isMinValue())
3209 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
3210 if (Upper.isMinValue())
3211 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
3212 }
3213
3214 // This set of folds is intentionally placed after folds that use no-wrapping
3215 // flags because those folds are likely better for later analysis/codegen.
3216 const APInt SMax = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
3217 const APInt SMin = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
3218
3219 // Fold compare with offset to opposite sign compare if it eliminates offset:
3220 // (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
3221 if (Pred == CmpInst::ICMP_UGT && C == *C2 + SMax)
3222 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
3223
3224 // (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
3225 if (Pred == CmpInst::ICMP_ULT && C == *C2 + SMin)
3226 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
3227
3228 // (X + C2) >s C --> X <u (SMAX - C) (if C == C2 - 1)
3229 if (Pred == CmpInst::ICMP_SGT && C == *C2 - 1)
3230 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, SMax - C));
3231
3232 // (X + C2) <s C --> X >u (C ^ SMAX) (if C == C2)
3233 if (Pred == CmpInst::ICMP_SLT && C == *C2)
3234 return new ICmpInst(ICmpInst::ICMP_UGT, X, ConstantInt::get(Ty, C ^ SMax));
3235
3236 // (X + -1) <u C --> X <=u C (if X is never null)
3237 if (Pred == CmpInst::ICMP_ULT && C2->isAllOnes()) {
3238 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3239 if (llvm::isKnownNonZero(X, Q))
3240 return new ICmpInst(ICmpInst::ICMP_ULE, X, ConstantInt::get(Ty, C));
3241 }
3242
3243 if (!Add->hasOneUse())
3244 return nullptr;
3245
3246 // X+C <u C2 -> (X & -C2) == C
3247 // iff C & (C2-1) == 0
3248 // C2 is a power of 2
3249 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
3250 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
3252
3253 // X+C2 <u C -> (X & C) == 2C
3254 // iff C == -(C2)
3255 // C2 is a power of 2
3256 if (Pred == ICmpInst::ICMP_ULT && C2->isPowerOf2() && C == -*C2)
3257 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, C),
3258 ConstantInt::get(Ty, C * 2));
3259
3260 // X+C >u C2 -> (X & ~C2) != C
3261 // iff C & C2 == 0
3262 // C2+1 is a power of 2
3263 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
3264 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
3266
3267 // The range test idiom can use either ult or ugt. Arbitrarily canonicalize
3268 // to the ult form.
3269 // X+C2 >u C -> X+(C2-C-1) <u ~C
3270 if (Pred == ICmpInst::ICMP_UGT)
3271 return new ICmpInst(ICmpInst::ICMP_ULT,
3272 Builder.CreateAdd(X, ConstantInt::get(Ty, *C2 - C - 1)),
3273 ConstantInt::get(Ty, ~C));
3274
3275 // zext(V) + C2 pred C -> V + C3 pred' C4
3276 Value *V;
3277 if (match(X, m_ZExt(m_Value(V)))) {
3278 Type *NewCmpTy = V->getType();
3279 unsigned NewCmpBW = NewCmpTy->getScalarSizeInBits();
3280 if (shouldChangeType(Ty, NewCmpTy)) {
3281 ConstantRange SrcCR = CR.truncate(NewCmpBW, TruncInst::NoUnsignedWrap);
3282 CmpInst::Predicate EquivPred;
3283 APInt EquivInt;
3284 APInt EquivOffset;
3285
3286 SrcCR.getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
3287 return new ICmpInst(
3288 EquivPred,
3289 EquivOffset.isZero()
3290 ? V
3291 : Builder.CreateAdd(V, ConstantInt::get(NewCmpTy, EquivOffset)),
3292 ConstantInt::get(NewCmpTy, EquivInt));
3293 }
3294 }
3295
3296 return nullptr;
3297}
3298
3300 Value *&RHS, ConstantInt *&Less,
3301 ConstantInt *&Equal,
3302 ConstantInt *&Greater) {
3303 // TODO: Generalize this to work with other comparison idioms or ensure
3304 // they get canonicalized into this form.
3305
3306 // select i1 (a == b),
3307 // i32 Equal,
3308 // i32 (select i1 (a < b), i32 Less, i32 Greater)
3309 // where Equal, Less and Greater are placeholders for any three constants.
3310 CmpPredicate PredA;
3311 if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
3312 !ICmpInst::isEquality(PredA))
3313 return false;
3314 Value *EqualVal = SI->getTrueValue();
3315 Value *UnequalVal = SI->getFalseValue();
3316 // We still can get non-canonical predicate here, so canonicalize.
3317 if (PredA == ICmpInst::ICMP_NE)
3318 std::swap(EqualVal, UnequalVal);
3319 if (!match(EqualVal, m_ConstantInt(Equal)))
3320 return false;
3321 CmpPredicate PredB;
3322 Value *LHS2, *RHS2;
3323 if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
3324 m_ConstantInt(Less), m_ConstantInt(Greater))))
3325 return false;
3326 // We can get predicate mismatch here, so canonicalize if possible:
3327 // First, ensure that 'LHS' match.
3328 if (LHS2 != LHS) {
3329 // x sgt y <--> y slt x
3330 std::swap(LHS2, RHS2);
3331 PredB = ICmpInst::getSwappedPredicate(PredB);
3332 }
3333 if (LHS2 != LHS)
3334 return false;
3335 // We also need to canonicalize 'RHS'.
3336 if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
3337 // x sgt C-1 <--> x sge C <--> not(x slt C)
3338 auto FlippedStrictness =
3340 if (!FlippedStrictness)
3341 return false;
3342 assert(FlippedStrictness->first == ICmpInst::ICMP_SGE &&
3343 "basic correctness failure");
3344 RHS2 = FlippedStrictness->second;
3345 // And kind-of perform the result swap.
3346 std::swap(Less, Greater);
3347 PredB = ICmpInst::ICMP_SLT;
3348 }
3349 return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
3350}
3351
3354 ConstantInt *C) {
3355
3356 assert(C && "Cmp RHS should be a constant int!");
3357 // If we're testing a constant value against the result of a three way
3358 // comparison, the result can be expressed directly in terms of the
3359 // original values being compared. Note: We could possibly be more
3360 // aggressive here and remove the hasOneUse test. The original select is
3361 // really likely to simplify or sink when we remove a test of the result.
3362 Value *OrigLHS, *OrigRHS;
3363 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3364 if (Cmp.hasOneUse() &&
3365 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
3366 C3GreaterThan)) {
3367 assert(C1LessThan && C2Equal && C3GreaterThan);
3368
3369 bool TrueWhenLessThan = ICmpInst::compare(
3370 C1LessThan->getValue(), C->getValue(), Cmp.getPredicate());
3371 bool TrueWhenEqual = ICmpInst::compare(C2Equal->getValue(), C->getValue(),
3372 Cmp.getPredicate());
3373 bool TrueWhenGreaterThan = ICmpInst::compare(
3374 C3GreaterThan->getValue(), C->getValue(), Cmp.getPredicate());
3375
3376 // This generates the new instruction that will replace the original Cmp
3377 // Instruction. Instead of enumerating the various combinations when
3378 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
3379 // false, we rely on chaining of ORs and future passes of InstCombine to
3380 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
3381
3382 // When none of the three constants satisfy the predicate for the RHS (C),
3383 // the entire original Cmp can be simplified to a false.
3384 Value *Cond = Builder.getFalse();
3385 if (TrueWhenLessThan)
3386 Cond = Builder.CreateOr(
3387 Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
3388 if (TrueWhenEqual)
3389 Cond = Builder.CreateOr(
3390 Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
3391 if (TrueWhenGreaterThan)
3392 Cond = Builder.CreateOr(
3393 Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
3394
3395 return replaceInstUsesWith(Cmp, Cond);
3396 }
3397 return nullptr;
3398}
3399
3401 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
3402 if (!Bitcast)
3403 return nullptr;
3404
3405 ICmpInst::Predicate Pred = Cmp.getPredicate();
3406 Value *Op1 = Cmp.getOperand(1);
3407 Value *BCSrcOp = Bitcast->getOperand(0);
3408 Type *SrcType = Bitcast->getSrcTy();
3409 Type *DstType = Bitcast->getType();
3410
3411 // Make sure the bitcast doesn't change between scalar and vector and
3412 // doesn't change the number of vector elements.
3413 if (SrcType->isVectorTy() == DstType->isVectorTy() &&
3414 SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
3415 // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
3416 Value *X;
3417 if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
3418 // icmp eq (bitcast (sitofp X)), 0 --> icmp eq X, 0
3419 // icmp ne (bitcast (sitofp X)), 0 --> icmp ne X, 0
3420 // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
3421 // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
3422 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
3423 Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
3424 match(Op1, m_Zero()))
3425 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3426
3427 // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
3428 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
3429 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
3430
3431 // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
3432 if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
3433 return new ICmpInst(Pred, X,
3434 ConstantInt::getAllOnesValue(X->getType()));
3435 }
3436
3437 // Zero-equality checks are preserved through unsigned floating-point casts:
3438 // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
3439 // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
3440 if (match(BCSrcOp, m_UIToFP(m_Value(X))))
3441 if (Cmp.isEquality() && match(Op1, m_Zero()))
3442 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3443
3444 const APInt *C;
3445 bool TrueIfSigned;
3446 if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse()) {
3447 // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
3448 // the FP extend/truncate because that cast does not change the sign-bit.
3449 // This is true for all standard IEEE-754 types and the X86 80-bit type.
3450 // The sign-bit is always the most significant bit in those types.
3451 if (isSignBitCheck(Pred, *C, TrueIfSigned) &&
3452 (match(BCSrcOp, m_FPExt(m_Value(X))) ||
3453 match(BCSrcOp, m_FPTrunc(m_Value(X))))) {
3454 // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
3455 // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
3456 Type *XType = X->getType();
3457
3458 // We can't currently handle Power style floating point operations here.
3459 if (!(XType->isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
3460 Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
3461 if (auto *XVTy = dyn_cast<VectorType>(XType))
3462 NewType = VectorType::get(NewType, XVTy->getElementCount());
3463 Value *NewBitcast = Builder.CreateBitCast(X, NewType);
3464 if (TrueIfSigned)
3465 return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
3466 ConstantInt::getNullValue(NewType));
3467 else
3468 return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
3470 }
3471 }
3472
3473 // icmp eq/ne (bitcast X to int), special fp -> llvm.is.fpclass(X, class)
3474 Type *FPType = SrcType->getScalarType();
3475 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3476 Attribute::NoImplicitFloat) &&
3477 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3478 FPClassTest Mask = APFloat(FPType->getFltSemantics(), *C).classify();
3479 if (Mask & (fcInf | fcZero)) {
3480 if (Pred == ICmpInst::ICMP_NE)
3481 Mask = ~Mask;
3482 return replaceInstUsesWith(Cmp,
3483 Builder.createIsFPClass(BCSrcOp, Mask));
3484 }
3485 }
3486 }
3487 }
3488
3489 const APInt *C;
3490 if (!match(Cmp.getOperand(1), m_APInt(C)) || !DstType->isIntegerTy() ||
3491 !SrcType->isIntOrIntVectorTy())
3492 return nullptr;
3493
3494 // If this is checking if all elements of a vector compare are set or not,
3495 // invert the casted vector equality compare and test if all compare
3496 // elements are clear or not. Compare against zero is generally easier for
3497 // analysis and codegen.
3498 // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0
3499 // Example: are all elements equal? --> are zero elements not equal?
3500 // TODO: Try harder to reduce compare of 2 freely invertible operands?
3501 if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse()) {
3502 if (Value *NotBCSrcOp =
3503 getFreelyInverted(BCSrcOp, BCSrcOp->hasOneUse(), &Builder)) {
3504 Value *Cast = Builder.CreateBitCast(NotBCSrcOp, DstType);
3505 return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
3506 }
3507 }
3508
3509 // If this is checking if all elements of an extended vector are clear or not,
3510 // compare in a narrow type to eliminate the extend:
3511 // icmp eq/ne (bitcast (ext X) to iN), 0 --> icmp eq/ne (bitcast X to iM), 0
3512 Value *X;
3513 if (Cmp.isEquality() && C->isZero() && Bitcast->hasOneUse() &&
3514 match(BCSrcOp, m_ZExtOrSExt(m_Value(X)))) {
3515 if (auto *VecTy = dyn_cast<FixedVectorType>(X->getType())) {
3516 Type *NewType = Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
3517 Value *NewCast = Builder.CreateBitCast(X, NewType);
3518 return new ICmpInst(Pred, NewCast, ConstantInt::getNullValue(NewType));
3519 }
3520 }
3521
3522 // Folding: icmp <pred> iN X, C
3523 // where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
3524 // and C is a splat of a K-bit pattern
3525 // and SC is a constant vector = <C', C', C', ..., C'>
3526 // Into:
3527 // %E = extractelement <M x iK> %vec, i32 C'
3528 // icmp <pred> iK %E, trunc(C)
3529 Value *Vec;
3530 ArrayRef<int> Mask;
3531 if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
3532 // Check whether every element of Mask is the same constant
3533 if (all_equal(Mask)) {
3534 auto *VecTy = cast<VectorType>(SrcType);
3535 auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3536 if (C->isSplat(EltTy->getBitWidth())) {
3537 // Fold the icmp based on the value of C
3538 // If C is M copies of an iK sized bit pattern,
3539 // then:
3540 // => %E = extractelement <N x iK> %vec, i32 Elem
3541 // icmp <pred> iK %SplatVal, <pattern>
3542 Value *Elem = Builder.getInt32(Mask[0]);
3543 Value *Extract = Builder.CreateExtractElement(Vec, Elem);
3544 Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
3545 return new ICmpInst(Pred, Extract, NewC);
3546 }
3547 }
3548 }
3549 return nullptr;
3550}
3551
3552/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3553/// where X is some kind of instruction.
3555 const APInt *C;
3556
3557 if (match(Cmp.getOperand(1), m_APInt(C))) {
3558 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3559 if (Instruction *I = foldICmpBinOpWithConstant(Cmp, BO, *C))
3560 return I;
3561
3562 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3563 // For now, we only support constant integers while folding the
3564 // ICMP(SELECT)) pattern. We can extend this to support vector of integers
3565 // similar to the cases handled by binary ops above.
3566 if (auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3567 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
3568 return I;
3569
3570 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3571 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
3572 return I;
3573
3574 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3576 return I;
3577
3578 // (extractval ([s/u]subo X, Y), 0) == 0 --> X == Y
3579 // (extractval ([s/u]subo X, Y), 0) != 0 --> X != Y
3580 // TODO: This checks one-use, but that is not strictly necessary.
3581 Value *Cmp0 = Cmp.getOperand(0);
3582 Value *X, *Y;
3583 if (C->isZero() && Cmp.isEquality() && Cmp0->hasOneUse() &&
3584 (match(Cmp0,
3586 m_Value(X), m_Value(Y)))) ||
3587 match(Cmp0,
3589 m_Value(X), m_Value(Y))))))
3590 return new ICmpInst(Cmp.getPredicate(), X, Y);
3591 }
3592
3593 if (match(Cmp.getOperand(1), m_APIntAllowPoison(C)))
3595
3596 return nullptr;
3597}
3598
3599/// Fold an icmp equality instruction with binary operator LHS and constant RHS:
3600/// icmp eq/ne BO, C.
3602 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3603 // TODO: Some of these folds could work with arbitrary constants, but this
3604 // function is limited to scalar and vector splat constants.
3605 if (!Cmp.isEquality())
3606 return nullptr;
3607
3608 ICmpInst::Predicate Pred = Cmp.getPredicate();
3609 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
3610 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
3611 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
3612
3613 switch (BO->getOpcode()) {
3614 case Instruction::SRem:
3615 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
3616 if (C.isZero() && BO->hasOneUse()) {
3617 const APInt *BOC;
3618 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
3619 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
3620 return new ICmpInst(Pred, NewRem,
3622 }
3623 }
3624 break;
3625 case Instruction::Add: {
3626 // (A + C2) == C --> A == (C - C2)
3627 // (A + C2) != C --> A != (C - C2)
3628 // TODO: Remove the one-use limitation? See discussion in D58633.
3629 if (Constant *C2 = dyn_cast<Constant>(BOp1)) {
3630 if (BO->hasOneUse())
3631 return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, C2));
3632 } else if (C.isZero()) {
3633 // Replace ((add A, B) != 0) with (A != -B) if A or B is
3634 // efficiently invertible, or if the add has just this one use.
3635 if (Value *NegVal = dyn_castNegVal(BOp1))
3636 return new ICmpInst(Pred, BOp0, NegVal);
3637 if (Value *NegVal = dyn_castNegVal(BOp0))
3638 return new ICmpInst(Pred, NegVal, BOp1);
3639 if (BO->hasOneUse()) {
3640 // (add nuw A, B) != 0 -> (or A, B) != 0
3641 if (match(BO, m_NUWAdd(m_Value(), m_Value()))) {
3642 Value *Or = Builder.CreateOr(BOp0, BOp1);
3643 return new ICmpInst(Pred, Or, Constant::getNullValue(BO->getType()));
3644 }
3645 Value *Neg = Builder.CreateNeg(BOp1);
3646 Neg->takeName(BO);
3647 return new ICmpInst(Pred, BOp0, Neg);
3648 }
3649 }
3650 break;
3651 }
3652 case Instruction::Xor:
3653 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3654 // For the xor case, we can xor two constants together, eliminating
3655 // the explicit xor.
3656 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3657 } else if (C.isZero()) {
3658 // Replace ((xor A, B) != 0) with (A != B)
3659 return new ICmpInst(Pred, BOp0, BOp1);
3660 }
3661 break;
3662 case Instruction::Or: {
3663 const APInt *BOC;
3664 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3665 // Comparing if all bits outside of a constant mask are set?
3666 // Replace (X | C) == -1 with (X & ~C) == ~C.
3667 // This removes the -1 constant.
3669 Value *And = Builder.CreateAnd(BOp0, NotBOC);
3670 return new ICmpInst(Pred, And, NotBOC);
3671 }
3672 // (icmp eq (or (select cond, 0, NonZero), Other), 0)
3673 // -> (and cond, (icmp eq Other, 0))
3674 // (icmp ne (or (select cond, NonZero, 0), Other), 0)
3675 // -> (or cond, (icmp ne Other, 0))
3676 Value *Cond, *TV, *FV, *Other, *Sel;
3677 if (C.isZero() &&
3678 match(BO,
3681 m_Value(FV))),
3682 m_Value(Other)))) &&
3683 Cond->getType() == Cmp.getType()) {
3684 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3685 // Easy case is if eq/ne matches whether 0 is trueval/falseval.
3686 if (Pred == ICmpInst::ICMP_EQ
3687 ? (match(TV, m_Zero()) && isKnownNonZero(FV, Q))
3688 : (match(FV, m_Zero()) && isKnownNonZero(TV, Q))) {
3689 Value *Cmp = Builder.CreateICmp(
3690 Pred, Other, Constant::getNullValue(Other->getType()));
3692 Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3693 Cond);
3694 }
3695 // Harder case is if eq/ne matches whether 0 is falseval/trueval. In this
3696 // case we need to invert the select condition so we need to be careful to
3697 // avoid creating extra instructions.
3698 // (icmp ne (or (select cond, 0, NonZero), Other), 0)
3699 // -> (or (not cond), (icmp ne Other, 0))
3700 // (icmp eq (or (select cond, NonZero, 0), Other), 0)
3701 // -> (and (not cond), (icmp eq Other, 0))
3702 //
3703 // Only do this if the inner select has one use, in which case we are
3704 // replacing `select` with `(not cond)`. Otherwise, we will create more
3705 // uses. NB: Trying to freely invert cond doesn't make sense here, as if
3706 // cond was freely invertable, the select arms would have been inverted.
3707 if (Sel->hasOneUse() &&
3708 (Pred == ICmpInst::ICMP_EQ
3709 ? (match(FV, m_Zero()) && isKnownNonZero(TV, Q))
3710 : (match(TV, m_Zero()) && isKnownNonZero(FV, Q)))) {
3711 Value *NotCond = Builder.CreateNot(Cond);
3712 Value *Cmp = Builder.CreateICmp(
3713 Pred, Other, Constant::getNullValue(Other->getType()));
3715 Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3716 NotCond);
3717 }
3718 }
3719 break;
3720 }
3721 case Instruction::UDiv:
3722 case Instruction::SDiv:
3723 if (BO->isExact()) {
3724 // div exact X, Y eq/ne 0 -> X eq/ne 0
3725 // div exact X, Y eq/ne 1 -> X eq/ne Y
3726 // div exact X, Y eq/ne C ->
3727 // if Y * C never-overflow && OneUse:
3728 // -> Y * C eq/ne X
3729 if (C.isZero())
3730 return new ICmpInst(Pred, BOp0, Constant::getNullValue(BO->getType()));
3731 else if (C.isOne())
3732 return new ICmpInst(Pred, BOp0, BOp1);
3733 else if (BO->hasOneUse()) {
3735 Instruction::Mul, BO->getOpcode() == Instruction::SDiv, BOp1,
3736 Cmp.getOperand(1), BO);
3738 Value *YC =
3739 Builder.CreateMul(BOp1, ConstantInt::get(BO->getType(), C));
3740 return new ICmpInst(Pred, YC, BOp0);
3741 }
3742 }
3743 }
3744 if (BO->getOpcode() == Instruction::UDiv && C.isZero()) {
3745 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3746 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3747 return new ICmpInst(NewPred, BOp1, BOp0);
3748 }
3749 break;
3750 default:
3751 break;
3752 }
3753 return nullptr;
3754}
3755
3757 const APInt &CRhs,
3758 InstCombiner::BuilderTy &Builder,
3759 const SimplifyQuery &Q) {
3760 assert(CtpopLhs->getIntrinsicID() == Intrinsic::ctpop &&
3761 "Non-ctpop intrin in ctpop fold");
3762 if (!CtpopLhs->hasOneUse())
3763 return nullptr;
3764
3765 // Power of 2 test:
3766 // isPow2OrZero : ctpop(X) u< 2
3767 // isPow2 : ctpop(X) == 1
3768 // NotPow2OrZero: ctpop(X) u> 1
3769 // NotPow2 : ctpop(X) != 1
3770 // If we know any bit of X can be folded to:
3771 // IsPow2 : X & (~Bit) == 0
3772 // NotPow2 : X & (~Bit) != 0
3773 const ICmpInst::Predicate Pred = I.getPredicate();
3774 if (((I.isEquality() || Pred == ICmpInst::ICMP_UGT) && CRhs == 1) ||
3775 (Pred == ICmpInst::ICMP_ULT && CRhs == 2)) {
3776 Value *Op = CtpopLhs->getArgOperand(0);
3777 KnownBits OpKnown = computeKnownBits(Op, Q.DL, Q.AC, Q.CxtI, Q.DT);
3778 // No need to check for count > 1, that should be already constant folded.
3779 if (OpKnown.countMinPopulation() == 1) {
3780 Value *And = Builder.CreateAnd(
3781 Op, Constant::getIntegerValue(Op->getType(), ~(OpKnown.One)));
3782 return new ICmpInst(
3783 (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_ULT)
3786 And, Constant::getNullValue(Op->getType()));
3787 }
3788 }
3789
3790 return nullptr;
3791}
3792
3793/// Fold an equality icmp with LLVM intrinsic and constant operand.
3795 ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3796 Type *Ty = II->getType();
3797 unsigned BitWidth = C.getBitWidth();
3798 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3799
3800 switch (II->getIntrinsicID()) {
3801 case Intrinsic::abs:
3802 // abs(A) == 0 -> A == 0
3803 // abs(A) == INT_MIN -> A == INT_MIN
3804 if (C.isZero() || C.isMinSignedValue())
3805 return new ICmpInst(Pred, II->getArgOperand(0), ConstantInt::get(Ty, C));
3806 break;
3807
3808 case Intrinsic::bswap:
3809 // bswap(A) == C -> A == bswap(C)
3810 return new ICmpInst(Pred, II->getArgOperand(0),
3811 ConstantInt::get(Ty, C.byteSwap()));
3812
3813 case Intrinsic::bitreverse:
3814 // bitreverse(A) == C -> A == bitreverse(C)
3815 return new ICmpInst(Pred, II->getArgOperand(0),
3816 ConstantInt::get(Ty, C.reverseBits()));
3817
3818 case Intrinsic::ctlz:
3819 case Intrinsic::cttz: {
3820 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
3821 if (C == BitWidth)
3822 return new ICmpInst(Pred, II->getArgOperand(0),
3824
3825 // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3826 // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3827 // Limit to one use to ensure we don't increase instruction count.
3828 unsigned Num = C.getLimitedValue(BitWidth);
3829 if (Num != BitWidth && II->hasOneUse()) {
3830 bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3831 APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3832 : APInt::getHighBitsSet(BitWidth, Num + 1);
3833 APInt Mask2 = IsTrailing
3836 return new ICmpInst(Pred, Builder.CreateAnd(II->getArgOperand(0), Mask1),
3837 ConstantInt::get(Ty, Mask2));
3838 }
3839 break;
3840 }
3841
3842 case Intrinsic::ctpop: {
3843 // popcount(A) == 0 -> A == 0 and likewise for !=
3844 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
3845 bool IsZero = C.isZero();
3846 if (IsZero || C == BitWidth)
3847 return new ICmpInst(Pred, II->getArgOperand(0),
3848 IsZero ? Constant::getNullValue(Ty)
3850
3851 break;
3852 }
3853
3854 case Intrinsic::fshl:
3855 case Intrinsic::fshr:
3856 if (II->getArgOperand(0) == II->getArgOperand(1)) {
3857 const APInt *RotAmtC;
3858 // ror(X, RotAmtC) == C --> X == rol(C, RotAmtC)
3859 // rol(X, RotAmtC) == C --> X == ror(C, RotAmtC)
3860 if (match(II->getArgOperand(2), m_APInt(RotAmtC)))
3861 return new ICmpInst(Pred, II->getArgOperand(0),
3862 II->getIntrinsicID() == Intrinsic::fshl
3863 ? ConstantInt::get(Ty, C.rotr(*RotAmtC))
3864 : ConstantInt::get(Ty, C.rotl(*RotAmtC)));
3865 }
3866 break;
3867
3868 case Intrinsic::umax:
3869 case Intrinsic::uadd_sat: {
3870 // uadd.sat(a, b) == 0 -> (a | b) == 0
3871 // umax(a, b) == 0 -> (a | b) == 0
3872 if (C.isZero() && II->hasOneUse()) {
3873 Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3874 return new ICmpInst(Pred, Or, Constant::getNullValue(Ty));
3875 }
3876 break;
3877 }
3878
3879 case Intrinsic::ssub_sat:
3880 // ssub.sat(a, b) == 0 -> a == b
3881 if (C.isZero())
3882 return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
3883 break;
3884 case Intrinsic::usub_sat: {
3885 // usub.sat(a, b) == 0 -> a <= b
3886 if (C.isZero()) {
3887 ICmpInst::Predicate NewPred =
3889 return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3890 }
3891 break;
3892 }
3893 default:
3894 break;
3895 }
3896
3897 return nullptr;
3898}
3899
3900/// Fold an icmp with LLVM intrinsics
3901static Instruction *
3903 InstCombiner::BuilderTy &Builder) {
3904 assert(Cmp.isEquality());
3905
3906 ICmpInst::Predicate Pred = Cmp.getPredicate();
3907 Value *Op0 = Cmp.getOperand(0);
3908 Value *Op1 = Cmp.getOperand(1);
3909 const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3910 const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3911 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3912 return nullptr;
3913
3914 switch (IIOp0->getIntrinsicID()) {
3915 case Intrinsic::bswap:
3916 case Intrinsic::bitreverse:
3917 // If both operands are byte-swapped or bit-reversed, just compare the
3918 // original values.
3919 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3920 case Intrinsic::fshl:
3921 case Intrinsic::fshr: {
3922 // If both operands are rotated by same amount, just compare the
3923 // original values.
3924 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3925 break;
3926 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3927 break;
3928 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3929 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3930
3931 // rotate(X, AmtX) == rotate(Y, AmtY)
3932 // -> rotate(X, AmtX - AmtY) == Y
3933 // Do this if either both rotates have one use or if only one has one use
3934 // and AmtX/AmtY are constants.
3935 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3936 if (OneUses == 2 ||
3937 (OneUses == 1 && match(IIOp0->getOperand(2), m_ImmConstant()) &&
3938 match(IIOp1->getOperand(2), m_ImmConstant()))) {
3939 Value *SubAmt =
3940 Builder.CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3941 Value *CombinedRotate = Builder.CreateIntrinsic(
3942 Op0->getType(), IIOp0->getIntrinsicID(),
3943 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3944 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3945 }
3946 } break;
3947 default:
3948 break;
3949 }
3950
3951 return nullptr;
3952}
3953
3954/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3955/// where X is some kind of instruction and C is AllowPoison.
3956/// TODO: Move more folds which allow poison to this function.
3959 const APInt &C) {
3960 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3961 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
3962 switch (II->getIntrinsicID()) {
3963 default:
3964 break;
3965 case Intrinsic::fshl:
3966 case Intrinsic::fshr:
3967 if (Cmp.isEquality() && II->getArgOperand(0) == II->getArgOperand(1)) {
3968 // (rot X, ?) == 0/-1 --> X == 0/-1
3969 if (C.isZero() || C.isAllOnes())
3970 return new ICmpInst(Pred, II->getArgOperand(0), Cmp.getOperand(1));
3971 }
3972 break;
3973 }
3974 }
3975
3976 return nullptr;
3977}
3978
3979/// Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
3981 BinaryOperator *BO,
3982 const APInt &C) {
3983 switch (BO->getOpcode()) {
3984 case Instruction::Xor:
3985 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
3986 return I;
3987 break;
3988 case Instruction::And:
3989 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
3990 return I;
3991 break;
3992 case Instruction::Or:
3993 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
3994 return I;
3995 break;
3996 case Instruction::Mul:
3997 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
3998 return I;
3999 break;
4000 case Instruction::Shl:
4001 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
4002 return I;
4003 break;
4004 case Instruction::LShr:
4005 case Instruction::AShr:
4006 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
4007 return I;
4008 break;
4009 case Instruction::SRem:
4010 if (Instruction *I = foldICmpSRemConstant(Cmp, BO, C))
4011 return I;
4012 break;
4013 case Instruction::UDiv:
4014 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
4015 return I;
4016 [[fallthrough]];
4017 case Instruction::SDiv:
4018 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
4019 return I;
4020 break;
4021 case Instruction::Sub:
4022 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
4023 return I;
4024 break;
4025 case Instruction::Add:
4026 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
4027 return I;
4028 break;
4029 default:
4030 break;
4031 }
4032
4033 // TODO: These folds could be refactored to be part of the above calls.
4035 return I;
4036
4037 // Fall back to handling `icmp pred (select A ? C1 : C2) binop (select B ? C3
4038 // : C4), C5` pattern, by computing a truth table of the four constant
4039 // variants.
4041}
4042
4043static Instruction *
4045 const APInt &C,
4046 InstCombiner::BuilderTy &Builder) {
4047 // This transform may end up producing more than one instruction for the
4048 // intrinsic, so limit it to one user of the intrinsic.
4049 if (!II->hasOneUse())
4050 return nullptr;
4051
4052 // Let Y = [add/sub]_sat(X, C) pred C2
4053 // SatVal = The saturating value for the operation
4054 // WillWrap = Whether or not the operation will underflow / overflow
4055 // => Y = (WillWrap ? SatVal : (X binop C)) pred C2
4056 // => Y = WillWrap ? (SatVal pred C2) : ((X binop C) pred C2)
4057 //
4058 // When (SatVal pred C2) is true, then
4059 // Y = WillWrap ? true : ((X binop C) pred C2)
4060 // => Y = WillWrap || ((X binop C) pred C2)
4061 // else
4062 // Y = WillWrap ? false : ((X binop C) pred C2)
4063 // => Y = !WillWrap ? ((X binop C) pred C2) : false
4064 // => Y = !WillWrap && ((X binop C) pred C2)
4065 Value *Op0 = II->getOperand(0);
4066 Value *Op1 = II->getOperand(1);
4067
4068 const APInt *COp1;
4069 // This transform only works when the intrinsic has an integral constant or
4070 // splat vector as the second operand.
4071 if (!match(Op1, m_APInt(COp1)))
4072 return nullptr;
4073
4074 APInt SatVal;
4075 switch (II->getIntrinsicID()) {
4076 default:
4078 "This function only works with usub_sat and uadd_sat for now!");
4079 case Intrinsic::uadd_sat:
4080 SatVal = APInt::getAllOnes(C.getBitWidth());
4081 break;
4082 case Intrinsic::usub_sat:
4083 SatVal = APInt::getZero(C.getBitWidth());
4084 break;
4085 }
4086
4087 // Check (SatVal pred C2)
4088 bool SatValCheck = ICmpInst::compare(SatVal, C, Pred);
4089
4090 // !WillWrap.
4092 II->getBinaryOp(), *COp1, II->getNoWrapKind());
4093
4094 // WillWrap.
4095 if (SatValCheck)
4096 C1 = C1.inverse();
4097
4099 if (II->getBinaryOp() == Instruction::Add)
4100 C2 = C2.sub(*COp1);
4101 else
4102 C2 = C2.add(*COp1);
4103
4104 Instruction::BinaryOps CombiningOp =
4105 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4106
4107 std::optional<ConstantRange> Combination;
4108 if (CombiningOp == Instruction::BinaryOps::Or)
4109 Combination = C1.exactUnionWith(C2);
4110 else /* CombiningOp == Instruction::BinaryOps::And */
4111 Combination = C1.exactIntersectWith(C2);
4112
4113 if (!Combination)
4114 return nullptr;
4115
4116 CmpInst::Predicate EquivPred;
4117 APInt EquivInt;
4118 APInt EquivOffset;
4119
4120 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4121
4122 return new ICmpInst(
4123 EquivPred,
4124 Builder.CreateAdd(Op0, ConstantInt::get(Op1->getType(), EquivOffset)),
4125 ConstantInt::get(Op1->getType(), EquivInt));
4126}
4127
4128static Instruction *
4130 const APInt &C,
4131 InstCombiner::BuilderTy &Builder) {
4132 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4133 switch (Pred) {
4134 case ICmpInst::ICMP_EQ:
4135 case ICmpInst::ICMP_NE:
4136 if (C.isZero())
4137 NewPredicate = Pred;
4138 else if (C.isOne())
4139 NewPredicate =
4141 else if (C.isAllOnes())
4142 NewPredicate =
4144 break;
4145
4146 case ICmpInst::ICMP_SGT:
4147 if (C.isAllOnes())
4148 NewPredicate = ICmpInst::ICMP_UGE;
4149 else if (C.isZero())
4150 NewPredicate = ICmpInst::ICMP_UGT;
4151 break;
4152
4153 case ICmpInst::ICMP_SLT:
4154 if (C.isZero())
4155 NewPredicate = ICmpInst::ICMP_ULT;
4156 else if (C.isOne())
4157 NewPredicate = ICmpInst::ICMP_ULE;
4158 break;
4159
4160 case ICmpInst::ICMP_ULT:
4161 if (C.ugt(1))
4162 NewPredicate = ICmpInst::ICMP_UGE;
4163 break;
4164
4165 case ICmpInst::ICMP_UGT:
4166 if (!C.isZero() && !C.isAllOnes())
4167 NewPredicate = ICmpInst::ICMP_ULT;
4168 break;
4169
4170 default:
4171 break;
4172 }
4173
4174 if (!NewPredicate)
4175 return nullptr;
4176
4177 if (I->getIntrinsicID() == Intrinsic::scmp)
4178 NewPredicate = ICmpInst::getSignedPredicate(*NewPredicate);
4179 Value *LHS = I->getOperand(0);
4180 Value *RHS = I->getOperand(1);
4181 return new ICmpInst(*NewPredicate, LHS, RHS);
4182}
4183
4184/// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
4187 const APInt &C) {
4188 ICmpInst::Predicate Pred = Cmp.getPredicate();
4189
4190 // Handle folds that apply for any kind of icmp.
4191 switch (II->getIntrinsicID()) {
4192 default:
4193 break;
4194 case Intrinsic::uadd_sat:
4195 case Intrinsic::usub_sat:
4196 if (auto *Folded = foldICmpUSubSatOrUAddSatWithConstant(
4197 Pred, cast<SaturatingInst>(II), C, Builder))
4198 return Folded;
4199 break;
4200 case Intrinsic::ctpop: {
4201 const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
4202 if (Instruction *R = foldCtpopPow2Test(Cmp, II, C, Builder, Q))
4203 return R;
4204 } break;
4205 case Intrinsic::scmp:
4206 case Intrinsic::ucmp:
4207 if (auto *Folded = foldICmpOfCmpIntrinsicWithConstant(Pred, II, C, Builder))
4208 return Folded;
4209 break;
4210 }
4211
4212 if (Cmp.isEquality())
4213 return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
4214
4215 Type *Ty = II->getType();
4216 unsigned BitWidth = C.getBitWidth();
4217 switch (II->getIntrinsicID()) {
4218 case Intrinsic::ctpop: {
4219 // (ctpop X > BitWidth - 1) --> X == -1
4220 Value *X = II->getArgOperand(0);
4221 if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
4222 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
4224 // (ctpop X < BitWidth) --> X != -1
4225 if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
4226 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
4228 break;
4229 }
4230 case Intrinsic::ctlz: {
4231 // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
4232 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4233 unsigned Num = C.getLimitedValue();
4234 APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
4235 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
4236 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4237 }
4238
4239 // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
4240 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4241 unsigned Num = C.getLimitedValue();
4243 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
4244 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4245 }
4246 break;
4247 }
4248 case Intrinsic::cttz: {
4249 // Limit to one use to ensure we don't increase instruction count.
4250 if (!II->hasOneUse())
4251 return nullptr;
4252
4253 // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
4254 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4255 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
4256 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
4257 Builder.CreateAnd(II->getArgOperand(0), Mask),
4259 }
4260
4261 // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
4262 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4263 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
4264 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
4265 Builder.CreateAnd(II->getArgOperand(0), Mask),
4267 }
4268 break;
4269 }
4270 case Intrinsic::ssub_sat:
4271 // ssub.sat(a, b) spred 0 -> a spred b
4272 if (ICmpInst::isSigned(Pred)) {
4273 if (C.isZero())
4274 return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
4275 // X s<= 0 is cannonicalized to X s< 1
4276 if (Pred == ICmpInst::ICMP_SLT && C.isOne())
4277 return new ICmpInst(ICmpInst::ICMP_SLE, II->getArgOperand(0),
4278 II->getArgOperand(1));
4279 // X s>= 0 is cannonicalized to X s> -1
4280 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
4281 return new ICmpInst(ICmpInst::ICMP_SGE, II->getArgOperand(0),
4282 II->getArgOperand(1));
4283 }
4284 break;
4285 default:
4286 break;
4287 }
4288
4289 return nullptr;
4290}
4291
4292/// Handle icmp with constant (but not simple integer constant) RHS.
4294 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4295 Constant *RHSC = dyn_cast<Constant>(Op1);
4297 if (!RHSC || !LHSI)
4298 return nullptr;
4299
4300 switch (LHSI->getOpcode()) {
4301 case Instruction::IntToPtr:
4302 // icmp pred inttoptr(X), null -> icmp pred X, 0
4303 if (RHSC->isNullValue() &&
4304 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
4305 return new ICmpInst(
4306 I.getPredicate(), LHSI->getOperand(0),
4308 break;
4309
4310 case Instruction::Load:
4311 // Try to optimize things like "A[i] > 4" to index computations.
4312 if (GetElementPtrInst *GEP =
4314 if (Instruction *Res =
4316 return Res;
4317 break;
4318 }
4319
4320 return nullptr;
4321}
4322
4324 Value *RHS, const ICmpInst &I) {
4325 // Try to fold the comparison into the select arms, which will cause the
4326 // select to be converted into a logical and/or.
4327 auto SimplifyOp = [&](Value *Op, bool SelectCondIsTrue) -> Value * {
4328 if (Value *Res = simplifyICmpInst(Pred, Op, RHS, SQ))
4329 return Res;
4330 if (std::optional<bool> Impl = isImpliedCondition(
4331 SI->getCondition(), Pred, Op, RHS, DL, SelectCondIsTrue))
4332 return ConstantInt::get(I.getType(), *Impl);
4333 return nullptr;
4334 };
4335
4336 ConstantInt *CI = nullptr;
4337 Value *Op1 = SimplifyOp(SI->getOperand(1), true);
4338 if (Op1)
4339 CI = dyn_cast<ConstantInt>(Op1);
4340
4341 Value *Op2 = SimplifyOp(SI->getOperand(2), false);
4342 if (Op2)
4343 CI = dyn_cast<ConstantInt>(Op2);
4344
4345 auto Simplifies = [&](Value *Op, unsigned Idx) {
4346 // A comparison of ucmp/scmp with a constant will fold into an icmp.
4347 const APInt *Dummy;
4348 return Op ||
4349 (isa<CmpIntrinsic>(SI->getOperand(Idx)) &&
4350 SI->getOperand(Idx)->hasOneUse() && match(RHS, m_APInt(Dummy)));
4351 };
4352
4353 // We only want to perform this transformation if it will not lead to
4354 // additional code. This is true if either both sides of the select
4355 // fold to a constant (in which case the icmp is replaced with a select
4356 // which will usually simplify) or this is the only user of the
4357 // select (in which case we are trading a select+icmp for a simpler
4358 // select+icmp) or all uses of the select can be replaced based on
4359 // dominance information ("Global cases").
4360 bool Transform = false;
4361 if (Op1 && Op2)
4362 Transform = true;
4363 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4364 // Local case
4365 if (SI->hasOneUse())
4366 Transform = true;
4367 // Global cases
4368 else if (CI && !CI->isZero())
4369 // When Op1 is constant try replacing select with second operand.
4370 // Otherwise Op2 is constant and try replacing select with first
4371 // operand.
4372 Transform = replacedSelectWithOperand(SI, &I, Op1 ? 2 : 1);
4373 }
4374 if (Transform) {
4375 if (!Op1)
4376 Op1 = Builder.CreateICmp(Pred, SI->getOperand(1), RHS, I.getName());
4377 if (!Op2)
4378 Op2 = Builder.CreateICmp(Pred, SI->getOperand(2), RHS, I.getName());
4379 return SelectInst::Create(SI->getOperand(0), Op1, Op2);
4380 }
4381
4382 return nullptr;
4383}
4384
4385// Returns whether V is a Mask ((X + 1) & X == 0) or ~Mask (-Pow2OrZero)
4386static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q,
4387 unsigned Depth = 0) {
4388 if (Not ? match(V, m_NegatedPower2OrZero()) : match(V, m_LowBitMaskOrZero()))
4389 return true;
4390 if (V->getType()->getScalarSizeInBits() == 1)
4391 return true;
4393 return false;
4394 Value *X;
4396 if (!I)
4397 return false;
4398 switch (I->getOpcode()) {
4399 case Instruction::ZExt:
4400 // ZExt(Mask) is a Mask.
4401 return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4402 case Instruction::SExt:
4403 // SExt(Mask) is a Mask.
4404 // SExt(~Mask) is a ~Mask.
4405 return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4406 case Instruction::And:
4407 case Instruction::Or:
4408 // Mask0 | Mask1 is a Mask.
4409 // Mask0 & Mask1 is a Mask.
4410 // ~Mask0 | ~Mask1 is a ~Mask.
4411 // ~Mask0 & ~Mask1 is a ~Mask.
4412 return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4413 isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4414 case Instruction::Xor:
4415 if (match(V, m_Not(m_Value(X))))
4416 return isMaskOrZero(X, !Not, Q, Depth);
4417
4418 // (X ^ -X) is a ~Mask
4419 if (Not)
4420 return match(V, m_c_Xor(m_Value(X), m_Neg(m_Deferred(X))));
4421 // (X ^ (X - 1)) is a Mask
4422 else
4423 return match(V, m_c_Xor(m_Value(X), m_Add(m_Deferred(X), m_AllOnes())));
4424 case Instruction::Select:
4425 // c ? Mask0 : Mask1 is a Mask.
4426 return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4427 isMaskOrZero(I->getOperand(2), Not, Q, Depth);
4428 case Instruction::Shl:
4429 // (~Mask) << X is a ~Mask.
4430 return Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4431 case Instruction::LShr:
4432 // Mask >> X is a Mask.
4433 return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4434 case Instruction::AShr:
4435 // Mask s>> X is a Mask.
4436 // ~Mask s>> X is a ~Mask.
4437 return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4438 case Instruction::Add:
4439 // Pow2 - 1 is a Mask.
4440 if (!Not && match(I->getOperand(1), m_AllOnes()))
4441 return isKnownToBeAPowerOfTwo(I->getOperand(0), Q.DL, /*OrZero*/ true,
4442 Q.AC, Q.CxtI, Q.DT, Depth);
4443 break;
4444 case Instruction::Sub:
4445 // -Pow2 is a ~Mask.
4446 if (Not && match(I->getOperand(0), m_Zero()))
4447 return isKnownToBeAPowerOfTwo(I->getOperand(1), Q.DL, /*OrZero*/ true,
4448 Q.AC, Q.CxtI, Q.DT, Depth);
4449 break;
4450 case Instruction::Call: {
4451 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
4452 switch (II->getIntrinsicID()) {
4453 // min/max(Mask0, Mask1) is a Mask.
4454 // min/max(~Mask0, ~Mask1) is a ~Mask.
4455 case Intrinsic::umax:
4456 case Intrinsic::smax:
4457 case Intrinsic::umin:
4458 case Intrinsic::smin:
4459 return isMaskOrZero(II->getArgOperand(1), Not, Q, Depth) &&
4460 isMaskOrZero(II->getArgOperand(0), Not, Q, Depth);
4461
4462 // In the context of masks, bitreverse(Mask) == ~Mask
4463 case Intrinsic::bitreverse:
4464 return isMaskOrZero(II->getArgOperand(0), !Not, Q, Depth);
4465 default:
4466 break;
4467 }
4468 }
4469 break;
4470 }
4471 default:
4472 break;
4473 }
4474 return false;
4475}
4476
4477/// Some comparisons can be simplified.
4478/// In this case, we are looking for comparisons that look like
4479/// a check for a lossy truncation.
4480/// Folds:
4481/// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask
4482/// icmp SrcPred (x & ~Mask), ~Mask to icmp DstPred x, ~Mask
4483/// icmp eq/ne (x & ~Mask), 0 to icmp DstPred x, Mask
4484/// icmp eq/ne (~x | Mask), -1 to icmp DstPred x, Mask
4485/// Where Mask is some pattern that produces all-ones in low bits:
4486/// (-1 >> y)
4487/// ((-1 << y) >> y) <- non-canonical, has extra uses
4488/// ~(-1 << y)
4489/// ((1 << y) + (-1)) <- non-canonical, has extra uses
4490/// The Mask can be a constant, too.
4491/// For some predicates, the operands are commutative.
4492/// For others, x can only be on a specific side.
4494 Value *Op1, const SimplifyQuery &Q,
4495 InstCombiner &IC) {
4496
4497 ICmpInst::Predicate DstPred;
4498 switch (Pred) {
4500 // x & Mask == x
4501 // x & ~Mask == 0
4502 // ~x | Mask == -1
4503 // -> x u<= Mask
4504 // x & ~Mask == ~Mask
4505 // -> ~Mask u<= x
4507 break;
4509 // x & Mask != x
4510 // x & ~Mask != 0
4511 // ~x | Mask != -1
4512 // -> x u> Mask
4513 // x & ~Mask != ~Mask
4514 // -> ~Mask u> x
4516 break;
4518 // x & Mask u< x
4519 // -> x u> Mask
4520 // x & ~Mask u< ~Mask
4521 // -> ~Mask u> x
4523 break;
4525 // x & Mask u>= x
4526 // -> x u<= Mask
4527 // x & ~Mask u>= ~Mask
4528 // -> ~Mask u<= x
4530 break;
4532 // x & Mask s< x [iff Mask s>= 0]
4533 // -> x s> Mask
4534 // x & ~Mask s< ~Mask [iff ~Mask != 0]
4535 // -> ~Mask s> x
4537 break;
4539 // x & Mask s>= x [iff Mask s>= 0]
4540 // -> x s<= Mask
4541 // x & ~Mask s>= ~Mask [iff ~Mask != 0]
4542 // -> ~Mask s<= x
4544 break;
4545 default:
4546 // We don't support sgt,sle
4547 // ult/ugt are simplified to true/false respectively.
4548 return nullptr;
4549 }
4550
4551 Value *X, *M;
4552 // Put search code in lambda for early positive returns.
4553 auto IsLowBitMask = [&]() {
4554 if (match(Op0, m_c_And(m_Specific(Op1), m_Value(M)))) {
4555 X = Op1;
4556 // Look for: x & Mask pred x
4557 if (isMaskOrZero(M, /*Not=*/false, Q)) {
4558 return !ICmpInst::isSigned(Pred) ||
4559 (match(M, m_NonNegative()) || isKnownNonNegative(M, Q));
4560 }
4561
4562 // Look for: x & ~Mask pred ~Mask
4563 if (isMaskOrZero(X, /*Not=*/true, Q)) {
4564 return !ICmpInst::isSigned(Pred) || isKnownNonZero(X, Q);
4565 }
4566 return false;
4567 }
4568 if (ICmpInst::isEquality(Pred) && match(Op1, m_AllOnes()) &&
4569 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(M))))) {
4570
4571 auto Check = [&]() {
4572 // Look for: ~x | Mask == -1
4573 if (isMaskOrZero(M, /*Not=*/false, Q)) {
4574 if (Value *NotX =
4575 IC.getFreelyInverted(X, X->hasOneUse(), &IC.Builder)) {
4576 X = NotX;
4577 return true;
4578 }
4579 }
4580 return false;
4581 };
4582 if (Check())
4583 return true;
4584 std::swap(X, M);
4585 return Check();
4586 }
4587 if (ICmpInst::isEquality(Pred) && match(Op1, m_Zero()) &&
4588 match(Op0, m_OneUse(m_And(m_Value(X), m_Value(M))))) {
4589 auto Check = [&]() {
4590 // Look for: x & ~Mask == 0
4591 if (isMaskOrZero(M, /*Not=*/true, Q)) {
4592 if (Value *NotM =
4593 IC.getFreelyInverted(M, M->hasOneUse(), &IC.Builder)) {
4594 M = NotM;
4595 return true;
4596 }
4597 }
4598 return false;
4599 };
4600 if (Check())
4601 return true;
4602 std::swap(X, M);
4603 return Check();
4604 }
4605 return false;
4606 };
4607
4608 if (!IsLowBitMask())
4609 return nullptr;
4610
4611 return IC.Builder.CreateICmp(DstPred, X, M);
4612}
4613
4614/// Some comparisons can be simplified.
4615/// In this case, we are looking for comparisons that look like
4616/// a check for a lossy signed truncation.
4617/// Folds: (MaskedBits is a constant.)
4618/// ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
4619/// Into:
4620/// (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
4621/// Where KeptBits = bitwidth(%x) - MaskedBits
4622static Value *
4624 InstCombiner::BuilderTy &Builder) {
4625 CmpPredicate SrcPred;
4626 Value *X;
4627 const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
4628 // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
4629 if (!match(&I, m_c_ICmp(SrcPred,
4631 m_APInt(C1))),
4632 m_Deferred(X))))
4633 return nullptr;
4634
4635 // Potential handling of non-splats: for each element:
4636 // * if both are undef, replace with constant 0.
4637 // Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
4638 // * if both are not undef, and are different, bailout.
4639 // * else, only one is undef, then pick the non-undef one.
4640
4641 // The shift amount must be equal.
4642 if (*C0 != *C1)
4643 return nullptr;
4644 const APInt &MaskedBits = *C0;
4645 assert(MaskedBits != 0 && "shift by zero should be folded away already.");
4646
4647 ICmpInst::Predicate DstPred;
4648 switch (SrcPred) {
4650 // ((%x << MaskedBits) a>> MaskedBits) == %x
4651 // =>
4652 // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
4654 break;
4656 // ((%x << MaskedBits) a>> MaskedBits) != %x
4657 // =>
4658 // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
4660 break;
4661 // FIXME: are more folds possible?
4662 default:
4663 return nullptr;
4664 }
4665
4666 auto *XType = X->getType();
4667 const unsigned XBitWidth = XType->getScalarSizeInBits();
4668 const APInt BitWidth = APInt(XBitWidth, XBitWidth);
4669 assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
4670
4671 // KeptBits = bitwidth(%x) - MaskedBits
4672 const APInt KeptBits = BitWidth - MaskedBits;
4673 assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
4674 // ICmpCst = (1 << KeptBits)
4675 const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
4676 assert(ICmpCst.isPowerOf2());
4677 // AddCst = (1 << (KeptBits-1))
4678 const APInt AddCst = ICmpCst.lshr(1);
4679 assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
4680
4681 // T0 = add %x, AddCst
4682 Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
4683 // T1 = T0 DstPred ICmpCst
4684 Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
4685
4686 return T1;
4687}
4688
4689// Given pattern:
4690// icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4691// we should move shifts to the same hand of 'and', i.e. rewrite as
4692// icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
4693// We are only interested in opposite logical shifts here.
4694// One of the shifts can be truncated.
4695// If we can, we want to end up creating 'lshr' shift.
4696static Value *
4698 InstCombiner::BuilderTy &Builder) {
4699 if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
4700 !I.getOperand(0)->hasOneUse())
4701 return nullptr;
4702
4703 auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
4704
4705 // Look for an 'and' of two logical shifts, one of which may be truncated.
4706 // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
4707 Instruction *XShift, *MaybeTruncation, *YShift;
4708 if (!match(
4709 I.getOperand(0),
4710 m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
4712 m_AnyLogicalShift, m_Instruction(YShift))),
4713 m_Instruction(MaybeTruncation)))))
4714 return nullptr;
4715
4716 // We potentially looked past 'trunc', but only when matching YShift,
4717 // therefore YShift must have the widest type.
4718 Instruction *WidestShift = YShift;
4719 // Therefore XShift must have the shallowest type.
4720 // Or they both have identical types if there was no truncation.
4721 Instruction *NarrowestShift = XShift;
4722
4723 Type *WidestTy = WidestShift->getType();
4724 Type *NarrowestTy = NarrowestShift->getType();
4725 assert(NarrowestTy == I.getOperand(0)->getType() &&
4726 "We did not look past any shifts while matching XShift though.");
4727 bool HadTrunc = WidestTy != I.getOperand(0)->getType();
4728
4729 // If YShift is a 'lshr', swap the shifts around.
4730 if (match(YShift, m_LShr(m_Value(), m_Value())))
4731 std::swap(XShift, YShift);
4732
4733 // The shifts must be in opposite directions.
4734 auto XShiftOpcode = XShift->getOpcode();
4735 if (XShiftOpcode == YShift->getOpcode())
4736 return nullptr; // Do not care about same-direction shifts here.
4737
4738 Value *X, *XShAmt, *Y, *YShAmt;
4739 match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
4740 match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
4741
4742 // If one of the values being shifted is a constant, then we will end with
4743 // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
4744 // however, we will need to ensure that we won't increase instruction count.
4745 if (!isa<Constant>(X) && !isa<Constant>(Y)) {
4746 // At least one of the hands of the 'and' should be one-use shift.
4747 if (!match(I.getOperand(0),
4748 m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
4749 return nullptr;
4750 if (HadTrunc) {
4751 // Due to the 'trunc', we will need to widen X. For that either the old
4752 // 'trunc' or the shift amt in the non-truncated shift should be one-use.
4753 if (!MaybeTruncation->hasOneUse() &&
4754 !NarrowestShift->getOperand(1)->hasOneUse())
4755 return nullptr;
4756 }
4757 }
4758
4759 // We have two shift amounts from two different shifts. The types of those
4760 // shift amounts may not match. If that's the case let's bailout now.
4761 if (XShAmt->getType() != YShAmt->getType())
4762 return nullptr;
4763
4764 // As input, we have the following pattern:
4765 // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4766 // We want to rewrite that as:
4767 // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
4768 // While we know that originally (Q+K) would not overflow
4769 // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
4770 // shift amounts. so it may now overflow in smaller bitwidth.
4771 // To ensure that does not happen, we need to ensure that the total maximal
4772 // shift amount is still representable in that smaller bit width.
4773 unsigned MaximalPossibleTotalShiftAmount =
4774 (WidestTy->getScalarSizeInBits() - 1) +
4775 (NarrowestTy->getScalarSizeInBits() - 1);
4776 APInt MaximalRepresentableShiftAmount =
4778 if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
4779 return nullptr;
4780
4781 // Can we fold (XShAmt+YShAmt) ?
4782 auto *NewShAmt = dyn_cast_or_null<Constant>(
4783 simplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
4784 /*isNUW=*/false, SQ.getWithInstruction(&I)));
4785 if (!NewShAmt)
4786 return nullptr;
4787 if (NewShAmt->getType() != WidestTy) {
4788 NewShAmt =
4789 ConstantFoldCastOperand(Instruction::ZExt, NewShAmt, WidestTy, SQ.DL);
4790 if (!NewShAmt)
4791 return nullptr;
4792 }
4793 unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
4794
4795 // Is the new shift amount smaller than the bit width?
4796 // FIXME: could also rely on ConstantRange.
4797 if (!match(NewShAmt,
4799 APInt(WidestBitWidth, WidestBitWidth))))
4800 return nullptr;
4801
4802 // An extra legality check is needed if we had trunc-of-lshr.
4803 if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
4804 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4805 WidestShift]() {
4806 // It isn't obvious whether it's worth it to analyze non-constants here.
4807 // Also, let's basically give up on non-splat cases, pessimizing vectors.
4808 // If *any* of these preconditions matches we can perform the fold.
4809 Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
4810 ? NewShAmt->getSplatValue()
4811 : NewShAmt;
4812 // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
4813 if (NewShAmtSplat &&
4814 (NewShAmtSplat->isNullValue() ||
4815 NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
4816 return true;
4817 // We consider *min* leading zeros so a single outlier
4818 // blocks the transform as opposed to allowing it.
4819 if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
4820 KnownBits Known = computeKnownBits(C, SQ.DL);
4821 unsigned MinLeadZero = Known.countMinLeadingZeros();
4822 // If the value being shifted has at most lowest bit set we can fold.
4823 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4824 if (MaxActiveBits <= 1)
4825 return true;
4826 // Precondition: NewShAmt u<= countLeadingZeros(C)
4827 if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
4828 return true;
4829 }
4830 if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
4831 KnownBits Known = computeKnownBits(C, SQ.DL);
4832 unsigned MinLeadZero = Known.countMinLeadingZeros();
4833 // If the value being shifted has at most lowest bit set we can fold.
4834 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4835 if (MaxActiveBits <= 1)
4836 return true;
4837 // Precondition: ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
4838 if (NewShAmtSplat) {
4839 APInt AdjNewShAmt =
4840 (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
4841 if (AdjNewShAmt.ule(MinLeadZero))
4842 return true;
4843 }
4844 }
4845 return false; // Can't tell if it's ok.
4846 };
4847 if (!CanFold())
4848 return nullptr;
4849 }
4850
4851 // All good, we can do this fold.
4852 X = Builder.CreateZExt(X, WidestTy);
4853 Y = Builder.CreateZExt(Y, WidestTy);
4854 // The shift is the same that was for X.
4855 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4856 ? Builder.CreateLShr(X, NewShAmt)
4857 : Builder.CreateShl(X, NewShAmt);
4858 Value *T1 = Builder.CreateAnd(T0, Y);
4859 return Builder.CreateICmp(I.getPredicate(), T1,
4860 Constant::getNullValue(WidestTy));
4861}
4862
4863/// Fold
4864/// (-1 u/ x) u< y
4865/// ((x * y) ?/ x) != y
4866/// to
4867/// @llvm.?mul.with.overflow(x, y) plus extraction of overflow bit
4868/// Note that the comparison is commutative, while inverted (u>=, ==) predicate
4869/// will mean that we are looking for the opposite answer.
4871 CmpPredicate Pred;
4872 Value *X, *Y;
4874 Instruction *Div;
4875 bool NeedNegation;
4876 // Look for: (-1 u/ x) u</u>= y
4877 if (!I.isEquality() &&
4878 match(&I, m_c_ICmp(Pred,
4880 m_Instruction(Div)),
4881 m_Value(Y)))) {
4882 Mul = nullptr;
4883
4884 // Are we checking that overflow does not happen, or does happen?
4885 switch (Pred) {
4887 NeedNegation = false;
4888 break; // OK
4890 NeedNegation = true;
4891 break; // OK
4892 default:
4893 return nullptr; // Wrong predicate.
4894 }
4895 } else // Look for: ((x * y) / x) !=/== y
4896 if (I.isEquality() &&
4897 match(&I, m_c_ICmp(Pred, m_Value(Y),
4900 m_Value(X)),
4902 m_Deferred(X))),
4903 m_Instruction(Div))))) {
4904 NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
4905 } else
4906 return nullptr;
4907
4909 // If the pattern included (x * y), we'll want to insert new instructions
4910 // right before that original multiplication so that we can replace it.
4911 bool MulHadOtherUses = Mul && !Mul->hasOneUse();
4912 if (MulHadOtherUses)
4913 Builder.SetInsertPoint(Mul);
4914
4915 CallInst *Call = Builder.CreateIntrinsic(
4916 Div->getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4917 : Intrinsic::smul_with_overflow,
4918 X->getType(), {X, Y}, /*FMFSource=*/nullptr, "mul");
4919
4920 // If the multiplication was used elsewhere, to ensure that we don't leave
4921 // "duplicate" instructions, replace uses of that original multiplication
4922 // with the multiplication result from the with.overflow intrinsic.
4923 if (MulHadOtherUses)
4924 replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "mul.val"));
4925
4926 Value *Res = Builder.CreateExtractValue(Call, 1, "mul.ov");
4927 if (NeedNegation) // This technically increases instruction count.
4928 Res = Builder.CreateNot(Res, "mul.not.ov");
4929
4930 // If we replaced the mul, erase it. Do this after all uses of Builder,
4931 // as the mul is used as insertion point.
4932 if (MulHadOtherUses)
4934
4935 return Res;
4936}
4937
4939 InstCombiner::BuilderTy &Builder) {
4940 CmpPredicate Pred;
4941 Value *X;
4942 if (match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X)))) {
4943
4944 if (ICmpInst::isSigned(Pred))
4945 Pred = ICmpInst::getSwappedPredicate(Pred);
4946 else if (ICmpInst::isUnsigned(Pred))
4947 Pred = ICmpInst::getSignedPredicate(Pred);
4948 // else for equality-comparisons just keep the predicate.
4949
4950 return ICmpInst::Create(Instruction::ICmp, Pred, X,
4951 Constant::getNullValue(X->getType()), I.getName());
4952 }
4953
4954 // A value is not equal to its negation unless that value is 0 or
4955 // MinSignedValue, ie: a != -a --> (a & MaxSignedVal) != 0
4956 if (match(&I, m_c_ICmp(Pred, m_OneUse(m_Neg(m_Value(X))), m_Deferred(X))) &&
4957 ICmpInst::isEquality(Pred)) {
4958 Type *Ty = X->getType();
4959 uint32_t BitWidth = Ty->getScalarSizeInBits();
4960 Constant *MaxSignedVal =
4961 ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
4962 Value *And = Builder.CreateAnd(X, MaxSignedVal);
4963 Constant *Zero = Constant::getNullValue(Ty);
4964 return CmpInst::Create(Instruction::ICmp, Pred, And, Zero);
4965 }
4966
4967 return nullptr;
4968}
4969
4971 InstCombinerImpl &IC) {
4972 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
4973 // Normalize and operand as operand 0.
4974 CmpInst::Predicate Pred = I.getPredicate();
4975 if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) {
4976 std::swap(Op0, Op1);
4977 Pred = ICmpInst::getSwappedPredicate(Pred);
4978 }
4979
4980 if (!match(Op0, m_c_And(m_Specific(Op1), m_Value(A))))
4981 return nullptr;
4982
4983 // (icmp (X & Y) u< X --> (X & Y) != X
4984 if (Pred == ICmpInst::ICMP_ULT)
4985 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4986
4987 // (icmp (X & Y) u>= X --> (X & Y) == X
4988 if (Pred == ICmpInst::ICMP_UGE)
4989 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
4990
4991 if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
4992 // icmp (X & Y) eq/ne Y --> (X | ~Y) eq/ne -1 if Y is freely invertible and
4993 // Y is non-constant. If Y is constant the `X & C == C` form is preferable
4994 // so don't do this fold.
4995 if (!match(Op1, m_ImmConstant()))
4996 if (auto *NotOp1 =
4997 IC.getFreelyInverted(Op1, !Op1->hasNUsesOrMore(3), &IC.Builder))
4998 return new ICmpInst(Pred, IC.Builder.CreateOr(A, NotOp1),
4999 Constant::getAllOnesValue(Op1->getType()));
5000 // icmp (X & Y) eq/ne Y --> (~X & Y) eq/ne 0 if X is freely invertible.
5001 if (auto *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
5002 return new ICmpInst(Pred, IC.Builder.CreateAnd(Op1, NotA),
5003 Constant::getNullValue(Op1->getType()));
5004 }
5005
5006 if (!ICmpInst::isSigned(Pred))
5007 return nullptr;
5008
5009 KnownBits KnownY = IC.computeKnownBits(A, &I);
5010 // (X & NegY) spred X --> (X & NegY) upred X
5011 if (KnownY.isNegative())
5012 return new ICmpInst(ICmpInst::getUnsignedPredicate(Pred), Op0, Op1);
5013
5014 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGT)
5015 return nullptr;
5016
5017 if (KnownY.isNonNegative())
5018 // (X & PosY) s<= X --> X s>= 0
5019 // (X & PosY) s> X --> X s< 0
5020 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
5021 Constant::getNullValue(Op1->getType()));
5022
5024 // (NegX & Y) s<= NegX --> Y s< 0
5025 // (NegX & Y) s> NegX --> Y s>= 0
5027 Constant::getNullValue(A->getType()));
5028
5029 return nullptr;
5030}
5031
5033 InstCombinerImpl &IC) {
5034 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5035
5036 // Normalize or operand as operand 0.
5037 CmpInst::Predicate Pred = I.getPredicate();
5038 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value(A)))) {
5039 std::swap(Op0, Op1);
5040 Pred = ICmpInst::getSwappedPredicate(Pred);
5041 } else if (!match(Op0, m_c_Or(m_Specific(Op1), m_Value(A)))) {
5042 return nullptr;
5043 }
5044
5045 // icmp (X | Y) u<= X --> (X | Y) == X
5046 if (Pred == ICmpInst::ICMP_ULE)
5047 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5048
5049 // icmp (X | Y) u> X --> (X | Y) != X
5050 if (Pred == ICmpInst::ICMP_UGT)
5051 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5052
5053 if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
5054 // icmp (X | Y) eq/ne Y --> (X & ~Y) eq/ne 0 if Y is freely invertible
5055 if (Value *NotOp1 = IC.getFreelyInverted(
5056 Op1, !isa<Constant>(Op1) && !Op1->hasNUsesOrMore(3), &IC.Builder))
5057 return new ICmpInst(Pred, IC.Builder.CreateAnd(A, NotOp1),
5058 Constant::getNullValue(Op1->getType()));
5059 // icmp (X | Y) eq/ne Y --> (~X | Y) eq/ne -1 if X is freely invertible.
5060 if (Value *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
5061 return new ICmpInst(Pred, IC.Builder.CreateOr(Op1, NotA),
5062 Constant::getAllOnesValue(Op1->getType()));
5063 }
5064 return nullptr;
5065}
5066
5068 InstCombinerImpl &IC) {
5069 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5070 // Normalize xor operand as operand 0.
5071 CmpInst::Predicate Pred = I.getPredicate();
5072 if (match(Op1, m_c_Xor(m_Specific(Op0), m_Value()))) {
5073 std::swap(Op0, Op1);
5074 Pred = ICmpInst::getSwappedPredicate(Pred);
5075 }
5076 if (!match(Op0, m_c_Xor(m_Specific(Op1), m_Value(A))))
5077 return nullptr;
5078
5079 // icmp (X ^ Y_NonZero) u>= X --> icmp (X ^ Y_NonZero) u> X
5080 // icmp (X ^ Y_NonZero) u<= X --> icmp (X ^ Y_NonZero) u< X
5081 // icmp (X ^ Y_NonZero) s>= X --> icmp (X ^ Y_NonZero) s> X
5082 // icmp (X ^ Y_NonZero) s<= X --> icmp (X ^ Y_NonZero) s< X
5084 if (PredOut != Pred && isKnownNonZero(A, Q))
5085 return new ICmpInst(PredOut, Op0, Op1);
5086
5087 // These transform work when A is negative.
5088 // X s< X^A, X s<= X^A, X u> X^A, X u>= X^A --> X s< 0
5089 // X s> X^A, X s>= X^A, X u< X^A, X u<= X^A --> X s>= 0
5090 if (match(A, m_Negative())) {
5091 CmpInst::Predicate NewPred;
5092 switch (ICmpInst::getStrictPredicate(Pred)) {
5093 default:
5094 return nullptr;
5095 case ICmpInst::ICMP_SLT:
5096 case ICmpInst::ICMP_UGT:
5097 NewPred = ICmpInst::ICMP_SLT;
5098 break;
5099 case ICmpInst::ICMP_SGT:
5100 case ICmpInst::ICMP_ULT:
5101 NewPred = ICmpInst::ICMP_SGE;
5102 break;
5103 }
5104 Constant *Const = Constant::getNullValue(Op0->getType());
5105 return new ICmpInst(NewPred, Op0, Const);
5106 }
5107
5108 return nullptr;
5109}
5110
5111/// Return true if X is a multiple of C.
5112/// TODO: Handle non-power-of-2 factors.
5113static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q) {
5114 if (C.isOne())
5115 return true;
5116
5117 if (!C.isPowerOf2())
5118 return false;
5119
5120 return MaskedValueIsZero(X, C - 1, Q);
5121}
5122
5123/// Try to fold icmp (binop), X or icmp X, (binop).
5124/// TODO: A large part of this logic is duplicated in InstSimplify's
5125/// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
5126/// duplication.
5128 const SimplifyQuery &SQ) {
5129 const SimplifyQuery Q = SQ.getWithInstruction(&I);
5130 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5131
5132 // Special logic for binary operators.
5135 if (!BO0 && !BO1)
5136 return nullptr;
5137
5138 if (Instruction *NewICmp = foldICmpXNegX(I, Builder))
5139 return NewICmp;
5140
5141 const CmpInst::Predicate Pred = I.getPredicate();
5142 Value *X;
5143
5144 // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
5145 // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
5146 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
5147 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5148 return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
5149 // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
5150 if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
5151 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5152 return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
5153
5154 {
5155 // (Op1 + X) + C u</u>= Op1 --> ~C - X u</u>= Op1
5156 Constant *C;
5157 if (match(Op0, m_OneUse(m_Add(m_c_Add(m_Specific(Op1), m_Value(X)),
5158 m_ImmConstant(C)))) &&
5159 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
5161 return new ICmpInst(Pred, Builder.CreateSub(C2, X), Op1);
5162 }
5163 // Op0 u>/u<= (Op0 + X) + C --> Op0 u>/u<= ~C - X
5164 if (match(Op1, m_OneUse(m_Add(m_c_Add(m_Specific(Op0), m_Value(X)),
5165 m_ImmConstant(C)))) &&
5166 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE)) {
5168 return new ICmpInst(Pred, Op0, Builder.CreateSub(C2, X));
5169 }
5170 }
5171
5172 // (icmp eq/ne (X, -P2), INT_MIN)
5173 // -> (icmp slt/sge X, INT_MIN + P2)
5174 if (ICmpInst::isEquality(Pred) && BO0 &&
5175 match(I.getOperand(1), m_SignMask()) &&
5177 // Will Constant fold.
5178 Value *NewC = Builder.CreateSub(I.getOperand(1), BO0->getOperand(1));
5179 return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SLT
5181 BO0->getOperand(0), NewC);
5182 }
5183
5184 {
5185 // Similar to above: an unsigned overflow comparison may use offset + mask:
5186 // ((Op1 + C) & C) u< Op1 --> Op1 != 0
5187 // ((Op1 + C) & C) u>= Op1 --> Op1 == 0
5188 // Op0 u> ((Op0 + C) & C) --> Op0 != 0
5189 // Op0 u<= ((Op0 + C) & C) --> Op0 == 0
5190 BinaryOperator *BO;
5191 const APInt *C;
5192 if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
5193 match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5195 CmpInst::Predicate NewPred =
5197 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5198 return new ICmpInst(NewPred, Op1, Zero);
5199 }
5200
5201 if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5202 match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5204 CmpInst::Predicate NewPred =
5206 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5207 return new ICmpInst(NewPred, Op0, Zero);
5208 }
5209 }
5210
5211 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
5212 bool Op0HasNUW = false, Op1HasNUW = false;
5213 bool Op0HasNSW = false, Op1HasNSW = false;
5214 // Analyze the case when either Op0 or Op1 is an add instruction.
5215 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
5216 auto hasNoWrapProblem = [](const BinaryOperator &BO, CmpInst::Predicate Pred,
5217 bool &HasNSW, bool &HasNUW) -> bool {
5219 HasNUW = BO.hasNoUnsignedWrap();
5220 HasNSW = BO.hasNoSignedWrap();
5221 return ICmpInst::isEquality(Pred) ||
5222 (CmpInst::isUnsigned(Pred) && HasNUW) ||
5223 (CmpInst::isSigned(Pred) && HasNSW);
5224 } else if (BO.getOpcode() == Instruction::Or) {
5225 HasNUW = true;
5226 HasNSW = true;
5227 return true;
5228 } else {
5229 return false;
5230 }
5231 };
5232 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
5233
5234 if (BO0) {
5235 match(BO0, m_AddLike(m_Value(A), m_Value(B)));
5236 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5237 }
5238 if (BO1) {
5239 match(BO1, m_AddLike(m_Value(C), m_Value(D)));
5240 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5241 }
5242
5243 // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
5244 // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
5245 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
5246 return new ICmpInst(Pred, A == Op1 ? B : A,
5247 Constant::getNullValue(Op1->getType()));
5248
5249 // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
5250 // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
5251 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
5252 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
5253 C == Op0 ? D : C);
5254
5255 // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
5256 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
5257 NoOp1WrapProblem) {
5258 // Determine Y and Z in the form icmp (X+Y), (X+Z).
5259 Value *Y, *Z;
5260 if (A == C) {
5261 // C + B == C + D -> B == D
5262 Y = B;
5263 Z = D;
5264 } else if (A == D) {
5265 // D + B == C + D -> B == C
5266 Y = B;
5267 Z = C;
5268 } else if (B == C) {
5269 // A + C == C + D -> A == D
5270 Y = A;
5271 Z = D;
5272 } else {
5273 assert(B == D);
5274 // A + D == C + D -> A == C
5275 Y = A;
5276 Z = C;
5277 }
5278 return new ICmpInst(Pred, Y, Z);
5279 }
5280
5281 if (ICmpInst::isRelational(Pred)) {
5282 // Return if both X and Y is divisible by Z/-Z.
5283 // TODO: Generalize to check if (X - Y) is divisible by Z/-Z.
5284 auto ShareCommonDivisor = [&Q](Value *X, Value *Y, Value *Z,
5285 bool IsNegative) -> bool {
5286 const APInt *OffsetC;
5287 if (!match(Z, m_APInt(OffsetC)))
5288 return false;
5289
5290 // Fast path for Z == 1/-1.
5291 if (IsNegative ? OffsetC->isAllOnes() : OffsetC->isOne())
5292 return true;
5293
5294 APInt C = *OffsetC;
5295 if (IsNegative)
5296 C.negate();
5297 // Note: -INT_MIN is also negative.
5298 if (!C.isStrictlyPositive())
5299 return false;
5300
5301 return isMultipleOf(X, C, Q) && isMultipleOf(Y, C, Q);
5302 };
5303
5304 // TODO: The subtraction-related identities shown below also hold, but
5305 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
5306 // wouldn't happen even if they were implemented.
5307 //
5308 // icmp ult (A - 1), Op1 -> icmp ule A, Op1
5309 // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
5310 // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
5311 // icmp ule Op0, (C - 1) -> icmp ult Op0, C
5312
5313 // icmp slt (A + -1), Op1 -> icmp sle A, Op1
5314 // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
5315 // icmp sle (A + 1), Op1 -> icmp slt A, Op1
5316 // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
5317 // icmp ule (A + 1), Op0 -> icmp ult A, Op1
5318 // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
5319 if (A && NoOp0WrapProblem &&
5320 ShareCommonDivisor(A, Op1, B,
5321 ICmpInst::isLT(Pred) || ICmpInst::isGE(Pred)))
5323 Op1);
5324
5325 // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
5326 // icmp sle Op0, (C + -1) -> icmp slt Op0, C
5327 // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
5328 // icmp slt Op0, (C + 1) -> icmp sle Op0, C
5329 // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
5330 // icmp ult Op0, (C + 1) -> icmp ule Op0, C
5331 if (C && NoOp1WrapProblem &&
5332 ShareCommonDivisor(Op0, C, D,
5333 ICmpInst::isGT(Pred) || ICmpInst::isLE(Pred)))
5335 C);
5336 }
5337
5338 // if C1 has greater magnitude than C2:
5339 // icmp (A + C1), (C + C2) -> icmp (A + C3), C
5340 // s.t. C3 = C1 - C2
5341 //
5342 // if C2 has greater magnitude than C1:
5343 // icmp (A + C1), (C + C2) -> icmp A, (C + C3)
5344 // s.t. C3 = C2 - C1
5345 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
5346 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) {
5347 const APInt *AP1, *AP2;
5348 // TODO: Support non-uniform vectors.
5349 // TODO: Allow poison passthrough if B or D's element is poison.
5350 if (match(B, m_APIntAllowPoison(AP1)) &&
5351 match(D, m_APIntAllowPoison(AP2)) &&
5352 AP1->isNegative() == AP2->isNegative()) {
5353 APInt AP1Abs = AP1->abs();
5354 APInt AP2Abs = AP2->abs();
5355 if (AP1Abs.uge(AP2Abs)) {
5356 APInt Diff = *AP1 - *AP2;
5357 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5358 Value *NewAdd = Builder.CreateAdd(
5359 A, C3, "", Op0HasNUW && Diff.ule(*AP1), Op0HasNSW);
5360 return new ICmpInst(Pred, NewAdd, C);
5361 } else {
5362 APInt Diff = *AP2 - *AP1;
5363 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5364 Value *NewAdd = Builder.CreateAdd(
5365 C, C3, "", Op1HasNUW && Diff.ule(*AP2), Op1HasNSW);
5366 return new ICmpInst(Pred, A, NewAdd);
5367 }
5368 }
5369 Constant *Cst1, *Cst2;
5370 if (match(B, m_ImmConstant(Cst1)) && match(D, m_ImmConstant(Cst2)) &&
5371 ICmpInst::isEquality(Pred)) {
5372 Constant *Diff = ConstantExpr::getSub(Cst2, Cst1);
5373 Value *NewAdd = Builder.CreateAdd(C, Diff);
5374 return new ICmpInst(Pred, A, NewAdd);
5375 }
5376 }
5377
5378 // Analyze the case when either Op0 or Op1 is a sub instruction.
5379 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
5380 A = nullptr;
5381 B = nullptr;
5382 C = nullptr;
5383 D = nullptr;
5384 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
5385 A = BO0->getOperand(0);
5386 B = BO0->getOperand(1);
5387 }
5388 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
5389 C = BO1->getOperand(0);
5390 D = BO1->getOperand(1);
5391 }
5392
5393 // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
5394 if (A == Op1 && NoOp0WrapProblem)
5395 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
5396 // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
5397 if (C == Op0 && NoOp1WrapProblem)
5398 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
5399
5400 // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
5401 // (A - B) u>/u<= A --> B u>/u<= A
5402 if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5403 return new ICmpInst(Pred, B, A);
5404 // C u</u>= (C - D) --> C u</u>= D
5405 if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5406 return new ICmpInst(Pred, C, D);
5407 // (A - B) u>=/u< A --> B u>/u<= A iff B != 0
5408 if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5409 isKnownNonZero(B, Q))
5411 // C u<=/u> (C - D) --> C u</u>= D iff B != 0
5412 if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
5413 isKnownNonZero(D, Q))
5415
5416 // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
5417 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
5418 return new ICmpInst(Pred, A, C);
5419
5420 // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
5421 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
5422 return new ICmpInst(Pred, D, B);
5423
5424 // icmp (0-X) < cst --> x > -cst
5425 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
5426 Value *X;
5427 if (match(BO0, m_Neg(m_Value(X))))
5428 if (Constant *RHSC = dyn_cast<Constant>(Op1))
5429 if (RHSC->isNotMinSignedValue())
5430 return new ICmpInst(I.getSwappedPredicate(), X,
5431 ConstantExpr::getNeg(RHSC));
5432 }
5433
5434 if (Instruction *R = foldICmpXorXX(I, Q, *this))
5435 return R;
5436 if (Instruction *R = foldICmpOrXX(I, Q, *this))
5437 return R;
5438
5439 {
5440 // Try to remove shared multiplier from comparison:
5441 // X * Z pred Y * Z
5442 Value *X, *Y, *Z;
5443 if ((match(Op0, m_Mul(m_Value(X), m_Value(Z))) &&
5444 match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y)))) ||
5445 (match(Op0, m_Mul(m_Value(Z), m_Value(X))) &&
5446 match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y))))) {
5447 if (ICmpInst::isSigned(Pred)) {
5448 if (Op0HasNSW && Op1HasNSW) {
5449 KnownBits ZKnown = computeKnownBits(Z, &I);
5450 if (ZKnown.isStrictlyPositive())
5451 return new ICmpInst(Pred, X, Y);
5452 if (ZKnown.isNegative())
5453 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), X, Y);
5455 SQ.getWithInstruction(&I));
5456 if (LessThan && match(LessThan, m_One()))
5457 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Z,
5458 Constant::getNullValue(Z->getType()));
5459 Value *GreaterThan = simplifyICmpInst(ICmpInst::ICMP_SGT, X, Y,
5460 SQ.getWithInstruction(&I));
5461 if (GreaterThan && match(GreaterThan, m_One()))
5462 return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5463 }
5464 } else {
5465 bool NonZero;
5466 if (ICmpInst::isEquality(Pred)) {
5467 // If X != Y, fold (X *nw Z) eq/ne (Y *nw Z) -> Z eq/ne 0
5468 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5469 isKnownNonEqual(X, Y, SQ))
5470 return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5471
5472 KnownBits ZKnown = computeKnownBits(Z, &I);
5473 // if Z % 2 != 0
5474 // X * Z eq/ne Y * Z -> X eq/ne Y
5475 if (ZKnown.countMaxTrailingZeros() == 0)
5476 return new ICmpInst(Pred, X, Y);
5477 NonZero = !ZKnown.One.isZero() || isKnownNonZero(Z, Q);
5478 // if Z != 0 and nsw(X * Z) and nsw(Y * Z)
5479 // X * Z eq/ne Y * Z -> X eq/ne Y
5480 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5481 return new ICmpInst(Pred, X, Y);
5482 } else
5483 NonZero = isKnownNonZero(Z, Q);
5484
5485 // If Z != 0 and nuw(X * Z) and nuw(Y * Z)
5486 // X * Z u{lt/le/gt/ge}/eq/ne Y * Z -> X u{lt/le/gt/ge}/eq/ne Y
5487 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5488 return new ICmpInst(Pred, X, Y);
5489 }
5490 }
5491 }
5492
5493 BinaryOperator *SRem = nullptr;
5494 // icmp (srem X, Y), Y
5495 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
5496 SRem = BO0;
5497 // icmp Y, (srem X, Y)
5498 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
5499 Op0 == BO1->getOperand(1))
5500 SRem = BO1;
5501 if (SRem) {
5502 // We don't check hasOneUse to avoid increasing register pressure because
5503 // the value we use is the same value this instruction was already using.
5504 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
5505 default:
5506 break;
5507 case ICmpInst::ICMP_EQ:
5508 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5509 case ICmpInst::ICMP_NE:
5510 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5511 case ICmpInst::ICMP_SGT:
5512 case ICmpInst::ICMP_SGE:
5513 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
5515 case ICmpInst::ICMP_SLT:
5516 case ICmpInst::ICMP_SLE:
5517 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
5519 }
5520 }
5521
5522 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
5523 (BO0->hasOneUse() || BO1->hasOneUse()) &&
5524 BO0->getOperand(1) == BO1->getOperand(1)) {
5525 switch (BO0->getOpcode()) {
5526 default:
5527 break;
5528 case Instruction::Add:
5529 case Instruction::Sub:
5530 case Instruction::Xor: {
5531 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
5532 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5533
5534 const APInt *C;
5535 if (match(BO0->getOperand(1), m_APInt(C))) {
5536 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
5537 if (C->isSignMask()) {
5538 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5539 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5540 }
5541
5542 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
5543 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
5544 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5545 NewPred = I.getSwappedPredicate(NewPred);
5546 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5547 }
5548 }
5549 break;
5550 }
5551 case Instruction::Mul: {
5552 if (!I.isEquality())
5553 break;
5554
5555 const APInt *C;
5556 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isZero() &&
5557 !C->isOne()) {
5558 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
5559 // Mask = -1 >> count-trailing-zeros(C).
5560 if (unsigned TZs = C->countr_zero()) {
5561 Constant *Mask = ConstantInt::get(
5562 BO0->getType(),
5563 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
5564 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
5565 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
5566 return new ICmpInst(Pred, And1, And2);
5567 }
5568 }
5569 break;
5570 }
5571 case Instruction::UDiv:
5572 case Instruction::LShr:
5573 if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
5574 break;
5575 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5576
5577 case Instruction::SDiv:
5578 if (!(I.isEquality() || match(BO0->getOperand(1), m_NonNegative())) ||
5579 !BO0->isExact() || !BO1->isExact())
5580 break;
5581 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5582
5583 case Instruction::AShr:
5584 if (!BO0->isExact() || !BO1->isExact())
5585 break;
5586 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5587
5588 case Instruction::Shl: {
5589 bool NUW = Op0HasNUW && Op1HasNUW;
5590 bool NSW = Op0HasNSW && Op1HasNSW;
5591 if (!NUW && !NSW)
5592 break;
5593 if (!NSW && I.isSigned())
5594 break;
5595 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5596 }
5597 }
5598 }
5599
5600 if (BO0) {
5601 // Transform A & (L - 1) `ult` L --> L != 0
5602 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
5603 auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
5604
5605 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
5606 auto *Zero = Constant::getNullValue(BO0->getType());
5607 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
5608 }
5609 }
5610
5611 // For unsigned predicates / eq / ne:
5612 // icmp pred (x << 1), x --> icmp getSignedPredicate(pred) x, 0
5613 // icmp pred x, (x << 1) --> icmp getSignedPredicate(pred) 0, x
5614 if (!ICmpInst::isSigned(Pred)) {
5615 if (match(Op0, m_Shl(m_Specific(Op1), m_One())))
5616 return new ICmpInst(ICmpInst::getSignedPredicate(Pred), Op1,
5617 Constant::getNullValue(Op1->getType()));
5618 else if (match(Op1, m_Shl(m_Specific(Op0), m_One())))
5619 return new ICmpInst(ICmpInst::getSignedPredicate(Pred),
5620 Constant::getNullValue(Op0->getType()), Op0);
5621 }
5622
5624 return replaceInstUsesWith(I, V);
5625
5626 if (Instruction *R = foldICmpAndXX(I, Q, *this))
5627 return R;
5628
5630 return replaceInstUsesWith(I, V);
5631
5633 return replaceInstUsesWith(I, V);
5634
5635 return nullptr;
5636}
5637
5638/// Fold icmp Pred min|max(X, Y), Z.
5641 Value *Z, CmpPredicate Pred) {
5642 Value *X = MinMax->getLHS();
5643 Value *Y = MinMax->getRHS();
5644 if (ICmpInst::isSigned(Pred) && !MinMax->isSigned())
5645 return nullptr;
5646 if (ICmpInst::isUnsigned(Pred) && MinMax->isSigned()) {
5647 // Revert the transform signed pred -> unsigned pred
5648 // TODO: We can flip the signedness of predicate if both operands of icmp
5649 // are negative.
5650 if (isKnownNonNegative(Z, SQ.getWithInstruction(&I)) &&
5651 isKnownNonNegative(MinMax, SQ.getWithInstruction(&I))) {
5653 } else
5654 return nullptr;
5655 }
5656 SimplifyQuery Q = SQ.getWithInstruction(&I);
5657 auto IsCondKnownTrue = [](Value *Val) -> std::optional<bool> {
5658 if (!Val)
5659 return std::nullopt;
5660 if (match(Val, m_One()))
5661 return true;
5662 if (match(Val, m_Zero()))
5663 return false;
5664 return std::nullopt;
5665 };
5666 // Remove samesign here since it is illegal to keep it when we speculatively
5667 // execute comparisons. For example, `icmp samesign ult umax(X, -46), -32`
5668 // cannot be decomposed into `(icmp samesign ult X, -46) or (icmp samesign ult
5669 // -46, -32)`. `X` is allowed to be non-negative here.
5670 Pred = Pred.dropSameSign();
5671 auto CmpXZ = IsCondKnownTrue(simplifyICmpInst(Pred, X, Z, Q));
5672 auto CmpYZ = IsCondKnownTrue(simplifyICmpInst(Pred, Y, Z, Q));
5673 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5674 return nullptr;
5675 if (!CmpXZ.has_value()) {
5676 std::swap(X, Y);
5677 std::swap(CmpXZ, CmpYZ);
5678 }
5679
5680 auto FoldIntoCmpYZ = [&]() -> Instruction * {
5681 if (CmpYZ.has_value())
5682 return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *CmpYZ));
5683 return ICmpInst::Create(Instruction::ICmp, Pred, Y, Z);
5684 };
5685
5686 switch (Pred) {
5687 case ICmpInst::ICMP_EQ:
5688 case ICmpInst::ICMP_NE: {
5689 // If X == Z:
5690 // Expr Result
5691 // min(X, Y) == Z X <= Y
5692 // max(X, Y) == Z X >= Y
5693 // min(X, Y) != Z X > Y
5694 // max(X, Y) != Z X < Y
5695 if ((Pred == ICmpInst::ICMP_EQ) == *CmpXZ) {
5696 ICmpInst::Predicate NewPred =
5697 ICmpInst::getNonStrictPredicate(MinMax->getPredicate());
5698 if (Pred == ICmpInst::ICMP_NE)
5699 NewPred = ICmpInst::getInversePredicate(NewPred);
5700 return ICmpInst::Create(Instruction::ICmp, NewPred, X, Y);
5701 }
5702 // Otherwise (X != Z):
5703 ICmpInst::Predicate NewPred = MinMax->getPredicate();
5704 auto MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5705 if (!MinMaxCmpXZ.has_value()) {
5706 std::swap(X, Y);
5707 std::swap(CmpXZ, CmpYZ);
5708 // Re-check pre-condition X != Z
5709 if (!CmpXZ.has_value() || (Pred == ICmpInst::ICMP_EQ) == *CmpXZ)
5710 break;
5711 MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5712 }
5713 if (!MinMaxCmpXZ.has_value())
5714 break;
5715 if (*MinMaxCmpXZ) {
5716 // Expr Fact Result
5717 // min(X, Y) == Z X < Z false
5718 // max(X, Y) == Z X > Z false
5719 // min(X, Y) != Z X < Z true
5720 // max(X, Y) != Z X > Z true
5721 return replaceInstUsesWith(
5722 I, ConstantInt::getBool(I.getType(), Pred == ICmpInst::ICMP_NE));
5723 } else {
5724 // Expr Fact Result
5725 // min(X, Y) == Z X > Z Y == Z
5726 // max(X, Y) == Z X < Z Y == Z
5727 // min(X, Y) != Z X > Z Y != Z
5728 // max(X, Y) != Z X < Z Y != Z
5729 return FoldIntoCmpYZ();
5730 }
5731 break;
5732 }
5733 case ICmpInst::ICMP_SLT:
5734 case ICmpInst::ICMP_ULT:
5735 case ICmpInst::ICMP_SLE:
5736 case ICmpInst::ICMP_ULE:
5737 case ICmpInst::ICMP_SGT:
5738 case ICmpInst::ICMP_UGT:
5739 case ICmpInst::ICMP_SGE:
5740 case ICmpInst::ICMP_UGE: {
5741 bool IsSame = MinMax->getPredicate() == ICmpInst::getStrictPredicate(Pred);
5742 if (*CmpXZ) {
5743 if (IsSame) {
5744 // Expr Fact Result
5745 // min(X, Y) < Z X < Z true
5746 // min(X, Y) <= Z X <= Z true
5747 // max(X, Y) > Z X > Z true
5748 // max(X, Y) >= Z X >= Z true
5749 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5750 } else {
5751 // Expr Fact Result
5752 // max(X, Y) < Z X < Z Y < Z
5753 // max(X, Y) <= Z X <= Z Y <= Z
5754 // min(X, Y) > Z X > Z Y > Z
5755 // min(X, Y) >= Z X >= Z Y >= Z
5756 return FoldIntoCmpYZ();
5757 }
5758 } else {
5759 if (IsSame) {
5760 // Expr Fact Result
5761 // min(X, Y) < Z X >= Z Y < Z
5762 // min(X, Y) <= Z X > Z Y <= Z
5763 // max(X, Y) > Z X <= Z Y > Z
5764 // max(X, Y) >= Z X < Z Y >= Z
5765 return FoldIntoCmpYZ();
5766 } else {
5767 // Expr Fact Result
5768 // max(X, Y) < Z X >= Z false
5769 // max(X, Y) <= Z X > Z false
5770 // min(X, Y) > Z X <= Z false
5771 // min(X, Y) >= Z X < Z false
5772 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5773 }
5774 }
5775 break;
5776 }
5777 default:
5778 break;
5779 }
5780
5781 return nullptr;
5782}
5783
5784/// Match and fold patterns like:
5785/// icmp eq/ne X, min(max(X, Lo), Hi)
5786/// which represents a range check and can be repsented as a ConstantRange.
5787///
5788/// For icmp eq, build ConstantRange [Lo, Hi + 1) and convert to:
5789/// (X - Lo) u< (Hi + 1 - Lo)
5790/// For icmp ne, build ConstantRange [Hi + 1, Lo) and convert to:
5791/// (X - (Hi + 1)) u< (Lo - (Hi + 1))
5793 MinMaxIntrinsic *Min) {
5794 if (!I.isEquality() || !Min->hasOneUse() || !Min->isMin())
5795 return nullptr;
5796
5797 const APInt *Lo = nullptr, *Hi = nullptr;
5798 if (Min->isSigned()) {
5799 if (!match(Min->getLHS(), m_OneUse(m_SMax(m_Specific(X), m_APInt(Lo)))) ||
5800 !match(Min->getRHS(), m_APInt(Hi)) || !Lo->slt(*Hi))
5801 return nullptr;
5802 } else {
5803 if (!match(Min->getLHS(), m_OneUse(m_UMax(m_Specific(X), m_APInt(Lo)))) ||
5804 !match(Min->getRHS(), m_APInt(Hi)) || !Lo->ult(*Hi))
5805 return nullptr;
5806 }
5807
5810 APInt C, Offset;
5811 if (I.getPredicate() == ICmpInst::ICMP_EQ)
5812 CR.getEquivalentICmp(Pred, C, Offset);
5813 else
5814 CR.inverse().getEquivalentICmp(Pred, C, Offset);
5815
5816 if (!Offset.isZero())
5817 X = Builder.CreateAdd(X, ConstantInt::get(X->getType(), Offset));
5818
5819 return replaceInstUsesWith(
5820 I, Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), C)));
5821}
5822
5823// Canonicalize checking for a power-of-2-or-zero value:
5825 InstCombiner::BuilderTy &Builder) {
5826 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5827 const CmpInst::Predicate Pred = I.getPredicate();
5828 Value *A = nullptr;
5829 bool CheckIs;
5830 if (I.isEquality()) {
5831 // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
5832 // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
5833 if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
5834 m_Deferred(A)))) ||
5835 !match(Op1, m_ZeroInt()))
5836 A = nullptr;
5837
5838 // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
5839 // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
5840 if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
5841 A = Op1;
5842 else if (match(Op1,
5844 A = Op0;
5845
5846 CheckIs = Pred == ICmpInst::ICMP_EQ;
5847 } else if (ICmpInst::isUnsigned(Pred)) {
5848 // (A ^ (A-1)) u>= A --> ctpop(A) < 2 (two commuted variants)
5849 // ((A-1) ^ A) u< A --> ctpop(A) > 1 (two commuted variants)
5850
5851 if ((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5853 m_Specific(Op1))))) {
5854 A = Op1;
5855 CheckIs = Pred == ICmpInst::ICMP_UGE;
5856 } else if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5858 m_Specific(Op0))))) {
5859 A = Op0;
5860 CheckIs = Pred == ICmpInst::ICMP_ULE;
5861 }
5862 }
5863
5864 if (A) {
5865 Type *Ty = A->getType();
5866 CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
5867 return CheckIs ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop,
5868 ConstantInt::get(Ty, 2))
5869 : new ICmpInst(ICmpInst::ICMP_UGT, CtPop,
5870 ConstantInt::get(Ty, 1));
5871 }
5872
5873 return nullptr;
5874}
5875
5876/// Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
5877using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
5879 bool AllowRecursion) {
5881 if (!Inst || !Inst->hasOneUse())
5882 return;
5883
5884 switch (Inst->getOpcode()) {
5885 case Instruction::Add:
5886 Offsets.emplace_back(Instruction::Sub, Inst->getOperand(1));
5887 Offsets.emplace_back(Instruction::Sub, Inst->getOperand(0));
5888 break;
5889 case Instruction::Sub:
5890 Offsets.emplace_back(Instruction::Add, Inst->getOperand(1));
5891 break;
5892 case Instruction::Xor:
5893 Offsets.emplace_back(Instruction::Xor, Inst->getOperand(1));
5894 Offsets.emplace_back(Instruction::Xor, Inst->getOperand(0));
5895 break;
5896 case Instruction::Shl:
5897 if (Inst->hasNoSignedWrap())
5898 Offsets.emplace_back(Instruction::AShr, Inst->getOperand(1));
5899 if (Inst->hasNoUnsignedWrap())
5900 Offsets.emplace_back(Instruction::LShr, Inst->getOperand(1));
5901 break;
5902 case Instruction::Select:
5903 if (AllowRecursion) {
5904 collectOffsetOp(Inst->getOperand(1), Offsets, /*AllowRecursion=*/false);
5905 collectOffsetOp(Inst->getOperand(2), Offsets, /*AllowRecursion=*/false);
5906 }
5907 break;
5908 default:
5909 break;
5910 }
5911}
5912
5914
5918
5920 return {OffsetKind::Invalid, nullptr, nullptr, nullptr};
5921 }
5923 return {OffsetKind::Value, V, nullptr, nullptr};
5924 }
5925 static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV) {
5926 return {OffsetKind::Select, Cond, TrueV, FalseV};
5927 }
5928 bool isValid() const { return Kind != OffsetKind::Invalid; }
5930 switch (Kind) {
5932 llvm_unreachable("Invalid offset result");
5933 case OffsetKind::Value:
5934 return V0;
5935 case OffsetKind::Select:
5936 return Builder.CreateSelect(V0, V1, V2);
5937 }
5938 llvm_unreachable("Unknown OffsetKind enum");
5939 }
5940};
5941
5942/// Offset both sides of an equality icmp to see if we can save some
5943/// instructions: icmp eq/ne X, Y -> icmp eq/ne X op Z, Y op Z.
5944/// Note: This operation should not introduce poison.
5946 InstCombiner::BuilderTy &Builder,
5947 const SimplifyQuery &SQ) {
5948 assert(I.isEquality() && "Expected an equality icmp");
5949 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5950 if (!Op0->getType()->isIntOrIntVectorTy())
5951 return nullptr;
5952
5953 SmallVector<OffsetOp, 4> OffsetOps;
5954 collectOffsetOp(Op0, OffsetOps, /*AllowRecursion=*/true);
5955 collectOffsetOp(Op1, OffsetOps, /*AllowRecursion=*/true);
5956
5957 auto ApplyOffsetImpl = [&](Value *V, unsigned BinOpc, Value *RHS) -> Value * {
5958 switch (BinOpc) {
5959 // V = shl nsw X, RHS => X = ashr V, RHS
5960 case Instruction::AShr: {
5961 const APInt *CV, *CRHS;
5962 if (!(match(V, m_APInt(CV)) && match(RHS, m_APInt(CRHS)) &&
5963 CV->ashr(*CRHS).shl(*CRHS) == *CV) &&
5965 return nullptr;
5966 break;
5967 }
5968 // V = shl nuw X, RHS => X = lshr V, RHS
5969 case Instruction::LShr: {
5970 const APInt *CV, *CRHS;
5971 if (!(match(V, m_APInt(CV)) && match(RHS, m_APInt(CRHS)) &&
5972 CV->lshr(*CRHS).shl(*CRHS) == *CV) &&
5974 return nullptr;
5975 break;
5976 }
5977 default:
5978 break;
5979 }
5980
5981 Value *Simplified = simplifyBinOp(BinOpc, V, RHS, SQ);
5982 if (!Simplified)
5983 return nullptr;
5984 // Reject constant expressions as they don't simplify things.
5985 if (isa<Constant>(Simplified) && !match(Simplified, m_ImmConstant()))
5986 return nullptr;
5987 // Check if the transformation introduces poison.
5988 return impliesPoison(RHS, V) ? Simplified : nullptr;
5989 };
5990
5991 auto ApplyOffset = [&](Value *V, unsigned BinOpc,
5992 Value *RHS) -> OffsetResult {
5993 if (auto *Sel = dyn_cast<SelectInst>(V)) {
5994 if (!Sel->hasOneUse())
5995 return OffsetResult::invalid();
5996 Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc, RHS);
5997 if (!TrueVal)
5998 return OffsetResult::invalid();
5999 Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc, RHS);
6000 if (!FalseVal)
6001 return OffsetResult::invalid();
6002 return OffsetResult::select(Sel->getCondition(), TrueVal, FalseVal);
6003 }
6004 if (Value *Simplified = ApplyOffsetImpl(V, BinOpc, RHS))
6005 return OffsetResult::value(Simplified);
6006 return OffsetResult::invalid();
6007 };
6008
6009 for (auto [BinOp, RHS] : OffsetOps) {
6010 auto BinOpc = static_cast<unsigned>(BinOp);
6011
6012 auto Op0Result = ApplyOffset(Op0, BinOpc, RHS);
6013 if (!Op0Result.isValid())
6014 continue;
6015 auto Op1Result = ApplyOffset(Op1, BinOpc, RHS);
6016 if (!Op1Result.isValid())
6017 continue;
6018
6019 Value *NewLHS = Op0Result.materialize(Builder);
6020 Value *NewRHS = Op1Result.materialize(Builder);
6021 return new ICmpInst(I.getPredicate(), NewLHS, NewRHS);
6022 }
6023
6024 return nullptr;
6025}
6026
6028 if (!I.isEquality())
6029 return nullptr;
6030
6031 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6032 const CmpInst::Predicate Pred = I.getPredicate();
6033 Value *A, *B, *C, *D;
6034 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
6035 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
6036 Value *OtherVal = A == Op1 ? B : A;
6037 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
6038 }
6039
6040 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
6041 // A^c1 == C^c2 --> A == C^(c1^c2)
6042 ConstantInt *C1, *C2;
6043 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
6044 Op1->hasOneUse()) {
6045 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
6046 Value *Xor = Builder.CreateXor(C, NC);
6047 return new ICmpInst(Pred, A, Xor);
6048 }
6049
6050 // A^B == A^D -> B == D
6051 if (A == C)
6052 return new ICmpInst(Pred, B, D);
6053 if (A == D)
6054 return new ICmpInst(Pred, B, C);
6055 if (B == C)
6056 return new ICmpInst(Pred, A, D);
6057 if (B == D)
6058 return new ICmpInst(Pred, A, C);
6059 }
6060 }
6061
6062 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
6063 // A == (A^B) -> B == 0
6064 Value *OtherVal = A == Op0 ? B : A;
6065 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
6066 }
6067
6068 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6069 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
6070 match(Op1, m_And(m_Value(C), m_Value(D)))) {
6071 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
6072
6073 if (A == C) {
6074 X = B;
6075 Y = D;
6076 Z = A;
6077 } else if (A == D) {
6078 X = B;
6079 Y = C;
6080 Z = A;
6081 } else if (B == C) {
6082 X = A;
6083 Y = D;
6084 Z = B;
6085 } else if (B == D) {
6086 X = A;
6087 Y = C;
6088 Z = B;
6089 }
6090
6091 if (X) {
6092 // If X^Y is a negative power of two, then `icmp eq/ne (Z & NegP2), 0`
6093 // will fold to `icmp ult/uge Z, -NegP2` incurringb no additional
6094 // instructions.
6095 const APInt *C0, *C1;
6096 bool XorIsNegP2 = match(X, m_APInt(C0)) && match(Y, m_APInt(C1)) &&
6097 (*C0 ^ *C1).isNegatedPowerOf2();
6098
6099 // If either Op0/Op1 are both one use or X^Y will constant fold and one of
6100 // Op0/Op1 are one use, proceed. In those cases we are instruction neutral
6101 // but `icmp eq/ne A, 0` is easier to analyze than `icmp eq/ne A, B`.
6102 int UseCnt =
6103 int(Op0->hasOneUse()) + int(Op1->hasOneUse()) +
6104 (int(match(X, m_ImmConstant()) && match(Y, m_ImmConstant())));
6105 if (XorIsNegP2 || UseCnt >= 2) {
6106 // Build (X^Y) & Z
6107 Op1 = Builder.CreateXor(X, Y);
6108 Op1 = Builder.CreateAnd(Op1, Z);
6109 return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
6110 }
6111 }
6112 }
6113
6114 {
6115 // Similar to above, but specialized for constant because invert is needed:
6116 // (X | C) == (Y | C) --> (X ^ Y) & ~C == 0
6117 Value *X, *Y;
6118 Constant *C;
6119 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_Constant(C)))) &&
6120 match(Op1, m_OneUse(m_Or(m_Value(Y), m_Specific(C))))) {
6121 Value *Xor = Builder.CreateXor(X, Y);
6122 Value *And = Builder.CreateAnd(Xor, ConstantExpr::getNot(C));
6123 return new ICmpInst(Pred, And, Constant::getNullValue(And->getType()));
6124 }
6125 }
6126
6127 if (match(Op1, m_ZExt(m_Value(A))) &&
6128 (Op0->hasOneUse() || Op1->hasOneUse())) {
6129 // (B & (Pow2C-1)) == zext A --> A == trunc B
6130 // (B & (Pow2C-1)) != zext A --> A != trunc B
6131 const APInt *MaskC;
6132 if (match(Op0, m_And(m_Value(B), m_LowBitMask(MaskC))) &&
6133 MaskC->countr_one() == A->getType()->getScalarSizeInBits())
6134 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
6135 }
6136
6137 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
6138 // For lshr and ashr pairs.
6139 const APInt *AP1, *AP2;
6140 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6141 match(Op1, m_OneUse(m_LShr(m_Value(B), m_APIntAllowPoison(AP2))))) ||
6142 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6143 match(Op1, m_OneUse(m_AShr(m_Value(B), m_APIntAllowPoison(AP2)))))) {
6144 if (*AP1 != *AP2)
6145 return nullptr;
6146 unsigned TypeBits = AP1->getBitWidth();
6147 unsigned ShAmt = AP1->getLimitedValue(TypeBits);
6148 if (ShAmt < TypeBits && ShAmt != 0) {
6149 ICmpInst::Predicate NewPred =
6151 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6152 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
6153 return new ICmpInst(NewPred, Xor, ConstantInt::get(A->getType(), CmpVal));
6154 }
6155 }
6156
6157 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
6158 ConstantInt *Cst1;
6159 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
6160 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
6161 unsigned TypeBits = Cst1->getBitWidth();
6162 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
6163 if (ShAmt < TypeBits && ShAmt != 0) {
6164 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6165 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
6166 Value *And =
6167 Builder.CreateAnd(Xor, Builder.getInt(AndVal), I.getName() + ".mask");
6168 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
6169 }
6170 }
6171
6172 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
6173 // "icmp (and X, mask), cst"
6174 uint64_t ShAmt = 0;
6175 if (Op0->hasOneUse() &&
6176 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
6177 match(Op1, m_ConstantInt(Cst1)) &&
6178 // Only do this when A has multiple uses. This is most important to do
6179 // when it exposes other optimizations.
6180 !A->hasOneUse()) {
6181 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
6182
6183 if (ShAmt < ASize) {
6184 APInt MaskV =
6186 MaskV <<= ShAmt;
6187
6188 APInt CmpV = Cst1->getValue().zext(ASize);
6189 CmpV <<= ShAmt;
6190
6191 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
6192 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
6193 }
6194 }
6195
6197 return ICmp;
6198
6199 // Match icmp eq (trunc (lshr A, BW), (ashr (trunc A), BW-1)), which checks
6200 // the top BW/2 + 1 bits are all the same. Create "A >=s INT_MIN && A <=s
6201 // INT_MAX", which we generate as "icmp ult (add A, 2^(BW-1)), 2^BW" to skip a
6202 // few steps of instcombine.
6203 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6204 if (match(Op0, m_AShr(m_Trunc(m_Value(A)), m_SpecificInt(BitWidth - 1))) &&
6206 A->getType()->getScalarSizeInBits() == BitWidth * 2 &&
6207 (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse())) {
6209 Value *Add = Builder.CreateAdd(A, ConstantInt::get(A->getType(), C));
6210 return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULT
6212 Add, ConstantInt::get(A->getType(), C.shl(1)));
6213 }
6214
6215 // Canonicalize:
6216 // Assume B_Pow2 != 0
6217 // 1. A & B_Pow2 != B_Pow2 -> A & B_Pow2 == 0
6218 // 2. A & B_Pow2 == B_Pow2 -> A & B_Pow2 != 0
6219 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())) &&
6220 isKnownToBeAPowerOfTwo(Op1, /* OrZero */ false, &I))
6221 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
6223
6224 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())) &&
6225 isKnownToBeAPowerOfTwo(Op0, /* OrZero */ false, &I))
6226 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op1,
6227 ConstantInt::getNullValue(Op1->getType()));
6228
6229 // Canonicalize:
6230 // icmp eq/ne X, OneUse(rotate-right(X))
6231 // -> icmp eq/ne X, rotate-left(X)
6232 // We generally try to convert rotate-right -> rotate-left, this just
6233 // canonicalizes another case.
6234 if (match(&I, m_c_ICmp(m_Value(A),
6236 m_Deferred(A), m_Deferred(A), m_Value(B))))))
6237 return new ICmpInst(
6238 Pred, A,
6239 Builder.CreateIntrinsic(Op0->getType(), Intrinsic::fshl, {A, A, B}));
6240
6241 // Canonicalize:
6242 // icmp eq/ne OneUse(A ^ Cst), B --> icmp eq/ne (A ^ B), Cst
6243 Constant *Cst;
6246 return new ICmpInst(Pred, Builder.CreateXor(A, B), Cst);
6247
6248 {
6249 // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6250 auto m_Matcher =
6253 m_Sub(m_Value(B), m_Deferred(A)));
6254 std::optional<bool> IsZero = std::nullopt;
6255 if (match(&I, m_c_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)),
6256 m_Deferred(A))))
6257 IsZero = false;
6258 // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6259 else if (match(&I,
6260 m_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)), m_Zero())))
6261 IsZero = true;
6262
6263 if (IsZero && isKnownToBeAPowerOfTwo(A, /* OrZero */ true, &I))
6264 // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6265 // -> (icmp eq/ne (and X, P2), 0)
6266 // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6267 // -> (icmp eq/ne (and X, P2), P2)
6268 return new ICmpInst(Pred, Builder.CreateAnd(B, A),
6269 *IsZero ? A
6270 : ConstantInt::getNullValue(A->getType()));
6271 }
6272
6273 if (auto *Res = foldICmpEqualityWithOffset(
6274 I, Builder, getSimplifyQuery().getWithInstruction(&I)))
6275 return Res;
6276
6277 return nullptr;
6278}
6279
6281 ICmpInst::Predicate Pred = ICmp.getPredicate();
6282 Value *Op0 = ICmp.getOperand(0), *Op1 = ICmp.getOperand(1);
6283
6284 // Try to canonicalize trunc + compare-to-constant into a mask + cmp.
6285 // The trunc masks high bits while the compare may effectively mask low bits.
6286 Value *X;
6287 const APInt *C;
6288 if (!match(Op0, m_OneUse(m_Trunc(m_Value(X)))) || !match(Op1, m_APInt(C)))
6289 return nullptr;
6290
6291 // This matches patterns corresponding to tests of the signbit as well as:
6292 // (trunc X) pred C2 --> (X & Mask) == C
6293 if (auto Res = decomposeBitTestICmp(Op0, Op1, Pred, /*WithTrunc=*/true,
6294 /*AllowNonZeroC=*/true)) {
6295 Value *And = Builder.CreateAnd(Res->X, Res->Mask);
6296 Constant *C = ConstantInt::get(Res->X->getType(), Res->C);
6297 return new ICmpInst(Res->Pred, And, C);
6298 }
6299
6300 unsigned SrcBits = X->getType()->getScalarSizeInBits();
6301 if (auto *II = dyn_cast<IntrinsicInst>(X)) {
6302 if (II->getIntrinsicID() == Intrinsic::cttz ||
6303 II->getIntrinsicID() == Intrinsic::ctlz) {
6304 unsigned MaxRet = SrcBits;
6305 // If the "is_zero_poison" argument is set, then we know at least
6306 // one bit is set in the input, so the result is always at least one
6307 // less than the full bitwidth of that input.
6308 if (match(II->getArgOperand(1), m_One()))
6309 MaxRet--;
6310
6311 // Make sure the destination is wide enough to hold the largest output of
6312 // the intrinsic.
6313 if (llvm::Log2_32(MaxRet) + 1 <= Op0->getType()->getScalarSizeInBits())
6314 if (Instruction *I =
6315 foldICmpIntrinsicWithConstant(ICmp, II, C->zext(SrcBits)))
6316 return I;
6317 }
6318 }
6319
6320 return nullptr;
6321}
6322
6324 assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
6325 auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
6326 Value *X;
6327 if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
6328 return nullptr;
6329
6330 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6331 bool IsSignedCmp = ICmp.isSigned();
6332
6333 // icmp Pred (ext X), (ext Y)
6334 Value *Y;
6335 if (match(ICmp.getOperand(1), m_ZExtOrSExt(m_Value(Y)))) {
6336 bool IsZext0 = isa<ZExtInst>(ICmp.getOperand(0));
6337 bool IsZext1 = isa<ZExtInst>(ICmp.getOperand(1));
6338
6339 if (IsZext0 != IsZext1) {
6340 // If X and Y and both i1
6341 // (icmp eq/ne (zext X) (sext Y))
6342 // eq -> (icmp eq (or X, Y), 0)
6343 // ne -> (icmp ne (or X, Y), 0)
6344 if (ICmp.isEquality() && X->getType()->isIntOrIntVectorTy(1) &&
6345 Y->getType()->isIntOrIntVectorTy(1))
6346 return new ICmpInst(ICmp.getPredicate(), Builder.CreateOr(X, Y),
6347 Constant::getNullValue(X->getType()));
6348
6349 // If we have mismatched casts and zext has the nneg flag, we can
6350 // treat the "zext nneg" as "sext". Otherwise, we cannot fold and quit.
6351
6352 auto *NonNegInst0 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(0));
6353 auto *NonNegInst1 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(1));
6354
6355 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6356 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6357
6358 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6359 IsSignedExt = true;
6360 else
6361 return nullptr;
6362 }
6363
6364 // Not an extension from the same type?
6365 Type *XTy = X->getType(), *YTy = Y->getType();
6366 if (XTy != YTy) {
6367 // One of the casts must have one use because we are creating a new cast.
6368 if (!ICmp.getOperand(0)->hasOneUse() && !ICmp.getOperand(1)->hasOneUse())
6369 return nullptr;
6370 // Extend the narrower operand to the type of the wider operand.
6371 CastInst::CastOps CastOpcode =
6372 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6373 if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
6374 X = Builder.CreateCast(CastOpcode, X, YTy);
6375 else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
6376 Y = Builder.CreateCast(CastOpcode, Y, XTy);
6377 else
6378 return nullptr;
6379 }
6380
6381 // (zext X) == (zext Y) --> X == Y
6382 // (sext X) == (sext Y) --> X == Y
6383 if (ICmp.isEquality())
6384 return new ICmpInst(ICmp.getPredicate(), X, Y);
6385
6386 // A signed comparison of sign extended values simplifies into a
6387 // signed comparison.
6388 if (IsSignedCmp && IsSignedExt)
6389 return new ICmpInst(ICmp.getPredicate(), X, Y);
6390
6391 // The other three cases all fold into an unsigned comparison.
6392 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
6393 }
6394
6395 // Below here, we are only folding a compare with constant.
6396 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
6397 if (!C)
6398 return nullptr;
6399
6400 // If a lossless truncate is possible...
6401 Type *SrcTy = CastOp0->getSrcTy();
6402 Constant *Res = getLosslessInvCast(C, SrcTy, CastOp0->getOpcode(), DL);
6403 if (Res) {
6404 if (ICmp.isEquality())
6405 return new ICmpInst(ICmp.getPredicate(), X, Res);
6406
6407 // A signed comparison of sign extended values simplifies into a
6408 // signed comparison.
6409 if (IsSignedExt && IsSignedCmp)
6410 return new ICmpInst(ICmp.getPredicate(), X, Res);
6411
6412 // The other three cases all fold into an unsigned comparison.
6413 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res);
6414 }
6415
6416 // The re-extended constant changed, partly changed (in the case of a vector),
6417 // or could not be determined to be equal (in the case of a constant
6418 // expression), so the constant cannot be represented in the shorter type.
6419 // All the cases that fold to true or false will have already been handled
6420 // by simplifyICmpInst, so only deal with the tricky case.
6421 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
6422 return nullptr;
6423
6424 // Is source op positive?
6425 // icmp ult (sext X), C --> icmp sgt X, -1
6426 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
6428
6429 // Is source op negative?
6430 // icmp ugt (sext X), C --> icmp slt X, 0
6431 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
6433}
6434
6435/// Handle icmp (cast x), (cast or constant).
6437 // If any operand of ICmp is a inttoptr roundtrip cast then remove it as
6438 // icmp compares only pointer's value.
6439 // icmp (inttoptr (ptrtoint p1)), p2 --> icmp p1, p2.
6440 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(0));
6441 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(1));
6442 if (SimplifiedOp0 || SimplifiedOp1)
6443 return new ICmpInst(ICmp.getPredicate(),
6444 SimplifiedOp0 ? SimplifiedOp0 : ICmp.getOperand(0),
6445 SimplifiedOp1 ? SimplifiedOp1 : ICmp.getOperand(1));
6446
6447 auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
6448 if (!CastOp0)
6449 return nullptr;
6450 if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
6451 return nullptr;
6452
6453 Value *Op0Src = CastOp0->getOperand(0);
6454 Type *SrcTy = CastOp0->getSrcTy();
6455 Type *DestTy = CastOp0->getDestTy();
6456
6457 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
6458 // integer type is the same size as the pointer type.
6459 auto CompatibleSizes = [&](Type *PtrTy, Type *IntTy) {
6460 if (isa<VectorType>(PtrTy)) {
6461 PtrTy = cast<VectorType>(PtrTy)->getElementType();
6462 IntTy = cast<VectorType>(IntTy)->getElementType();
6463 }
6464 return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
6465 };
6466 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6467 CompatibleSizes(SrcTy, DestTy)) {
6468 Value *NewOp1 = nullptr;
6469 if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
6470 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6471 if (PtrSrc->getType() == Op0Src->getType())
6472 NewOp1 = PtrToIntOp1->getOperand(0);
6473 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6474 NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
6475 }
6476
6477 if (NewOp1)
6478 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6479 }
6480
6481 // Do the same in the other direction for icmp (inttoptr x), (inttoptr/c).
6482 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6483 CompatibleSizes(DestTy, SrcTy)) {
6484 Value *NewOp1 = nullptr;
6485 if (auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.getOperand(1))) {
6486 Value *IntSrc = IntToPtrOp1->getOperand(0);
6487 if (IntSrc->getType() == Op0Src->getType())
6488 NewOp1 = IntToPtrOp1->getOperand(0);
6489 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6490 NewOp1 = ConstantFoldConstant(ConstantExpr::getPtrToInt(RHSC, SrcTy), DL);
6491 }
6492
6493 if (NewOp1)
6494 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6495 }
6496
6497 if (Instruction *R = foldICmpWithTrunc(ICmp))
6498 return R;
6499
6500 return foldICmpWithZextOrSext(ICmp);
6501}
6502
6504 bool IsSigned) {
6505 switch (BinaryOp) {
6506 default:
6507 llvm_unreachable("Unsupported binary op");
6508 case Instruction::Add:
6509 case Instruction::Sub:
6510 return match(RHS, m_Zero());
6511 case Instruction::Mul:
6512 return !(RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
6513 match(RHS, m_One());
6514 }
6515}
6516
6519 bool IsSigned, Value *LHS, Value *RHS,
6520 Instruction *CxtI) const {
6521 switch (BinaryOp) {
6522 default:
6523 llvm_unreachable("Unsupported binary op");
6524 case Instruction::Add:
6525 if (IsSigned)
6526 return computeOverflowForSignedAdd(LHS, RHS, CxtI);
6527 else
6528 return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
6529 case Instruction::Sub:
6530 if (IsSigned)
6531 return computeOverflowForSignedSub(LHS, RHS, CxtI);
6532 else
6533 return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
6534 case Instruction::Mul:
6535 if (IsSigned)
6536 return computeOverflowForSignedMul(LHS, RHS, CxtI);
6537 else
6538 return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
6539 }
6540}
6541
6542bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
6543 bool IsSigned, Value *LHS,
6544 Value *RHS, Instruction &OrigI,
6545 Value *&Result,
6546 Constant *&Overflow) {
6547 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
6548 std::swap(LHS, RHS);
6549
6550 // If the overflow check was an add followed by a compare, the insertion point
6551 // may be pointing to the compare. We want to insert the new instructions
6552 // before the add in case there are uses of the add between the add and the
6553 // compare.
6554 Builder.SetInsertPoint(&OrigI);
6555
6556 Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
6557 if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
6558 OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
6559
6560 if (isNeutralValue(BinaryOp, RHS, IsSigned)) {
6561 Result = LHS;
6562 Overflow = ConstantInt::getFalse(OverflowTy);
6563 return true;
6564 }
6565
6566 switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
6568 return false;
6571 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6572 Result->takeName(&OrigI);
6573 Overflow = ConstantInt::getTrue(OverflowTy);
6574 return true;
6576 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6577 Result->takeName(&OrigI);
6578 Overflow = ConstantInt::getFalse(OverflowTy);
6579 if (auto *Inst = dyn_cast<Instruction>(Result)) {
6580 if (IsSigned)
6581 Inst->setHasNoSignedWrap();
6582 else
6583 Inst->setHasNoUnsignedWrap();
6584 }
6585 return true;
6586 }
6587
6588 llvm_unreachable("Unexpected overflow result");
6589}
6590
6591/// Recognize and process idiom involving test for multiplication
6592/// overflow.
6593///
6594/// The caller has matched a pattern of the form:
6595/// I = cmp u (mul(zext A, zext B), V
6596/// The function checks if this is a test for overflow and if so replaces
6597/// multiplication with call to 'mul.with.overflow' intrinsic.
6598///
6599/// \param I Compare instruction.
6600/// \param MulVal Result of 'mult' instruction. It is one of the arguments of
6601/// the compare instruction. Must be of integer type.
6602/// \param OtherVal The other argument of compare instruction.
6603/// \returns Instruction which must replace the compare instruction, NULL if no
6604/// replacement required.
6606 const APInt *OtherVal,
6607 InstCombinerImpl &IC) {
6608 // Don't bother doing this transformation for pointers, don't do it for
6609 // vectors.
6610 if (!isa<IntegerType>(MulVal->getType()))
6611 return nullptr;
6612
6613 auto *MulInstr = dyn_cast<Instruction>(MulVal);
6614 if (!MulInstr)
6615 return nullptr;
6616 assert(MulInstr->getOpcode() == Instruction::Mul);
6617
6618 auto *LHS = cast<ZExtInst>(MulInstr->getOperand(0)),
6619 *RHS = cast<ZExtInst>(MulInstr->getOperand(1));
6620 assert(LHS->getOpcode() == Instruction::ZExt);
6621 assert(RHS->getOpcode() == Instruction::ZExt);
6622 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
6623
6624 // Calculate type and width of the result produced by mul.with.overflow.
6625 Type *TyA = A->getType(), *TyB = B->getType();
6626 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
6627 WidthB = TyB->getPrimitiveSizeInBits();
6628 unsigned MulWidth;
6629 Type *MulType;
6630 if (WidthB > WidthA) {
6631 MulWidth = WidthB;
6632 MulType = TyB;
6633 } else {
6634 MulWidth = WidthA;
6635 MulType = TyA;
6636 }
6637
6638 // In order to replace the original mul with a narrower mul.with.overflow,
6639 // all uses must ignore upper bits of the product. The number of used low
6640 // bits must be not greater than the width of mul.with.overflow.
6641 if (MulVal->hasNUsesOrMore(2))
6642 for (User *U : MulVal->users()) {
6643 if (U == &I)
6644 continue;
6645 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6646 // Check if truncation ignores bits above MulWidth.
6647 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
6648 if (TruncWidth > MulWidth)
6649 return nullptr;
6650 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6651 // Check if AND ignores bits above MulWidth.
6652 if (BO->getOpcode() != Instruction::And)
6653 return nullptr;
6654 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6655 const APInt &CVal = CI->getValue();
6656 if (CVal.getBitWidth() - CVal.countl_zero() > MulWidth)
6657 return nullptr;
6658 } else {
6659 // In this case we could have the operand of the binary operation
6660 // being defined in another block, and performing the replacement
6661 // could break the dominance relation.
6662 return nullptr;
6663 }
6664 } else {
6665 // Other uses prohibit this transformation.
6666 return nullptr;
6667 }
6668 }
6669
6670 // Recognize patterns
6671 switch (I.getPredicate()) {
6672 case ICmpInst::ICMP_UGT: {
6673 // Recognize pattern:
6674 // mulval = mul(zext A, zext B)
6675 // cmp ugt mulval, max
6676 APInt MaxVal = APInt::getMaxValue(MulWidth);
6677 MaxVal = MaxVal.zext(OtherVal->getBitWidth());
6678 if (MaxVal.eq(*OtherVal))
6679 break; // Recognized
6680 return nullptr;
6681 }
6682
6683 case ICmpInst::ICMP_ULT: {
6684 // Recognize pattern:
6685 // mulval = mul(zext A, zext B)
6686 // cmp ule mulval, max + 1
6687 APInt MaxVal = APInt::getOneBitSet(OtherVal->getBitWidth(), MulWidth);
6688 if (MaxVal.eq(*OtherVal))
6689 break; // Recognized
6690 return nullptr;
6691 }
6692
6693 default:
6694 return nullptr;
6695 }
6696
6697 InstCombiner::BuilderTy &Builder = IC.Builder;
6698 Builder.SetInsertPoint(MulInstr);
6699
6700 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
6701 Value *MulA = A, *MulB = B;
6702 if (WidthA < MulWidth)
6703 MulA = Builder.CreateZExt(A, MulType);
6704 if (WidthB < MulWidth)
6705 MulB = Builder.CreateZExt(B, MulType);
6706 CallInst *Call =
6707 Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
6708 {MulA, MulB}, /*FMFSource=*/nullptr, "umul");
6709 IC.addToWorklist(MulInstr);
6710
6711 // If there are uses of mul result other than the comparison, we know that
6712 // they are truncation or binary AND. Change them to use result of
6713 // mul.with.overflow and adjust properly mask/size.
6714 if (MulVal->hasNUsesOrMore(2)) {
6715 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
6716 for (User *U : make_early_inc_range(MulVal->users())) {
6717 if (U == &I)
6718 continue;
6719 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6720 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6721 IC.replaceInstUsesWith(*TI, Mul);
6722 else
6723 TI->setOperand(0, Mul);
6724 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6725 assert(BO->getOpcode() == Instruction::And);
6726 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
6727 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
6728 APInt ShortMask = CI->getValue().trunc(MulWidth);
6729 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
6730 Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
6731 IC.replaceInstUsesWith(*BO, Zext);
6732 } else {
6733 llvm_unreachable("Unexpected Binary operation");
6734 }
6736 }
6737 }
6738
6739 // The original icmp gets replaced with the overflow value, maybe inverted
6740 // depending on predicate.
6741 if (I.getPredicate() == ICmpInst::ICMP_ULT) {
6742 Value *Res = Builder.CreateExtractValue(Call, 1);
6743 return BinaryOperator::CreateNot(Res);
6744 }
6745
6746 return ExtractValueInst::Create(Call, 1);
6747}
6748
6749/// When performing a comparison against a constant, it is possible that not all
6750/// the bits in the LHS are demanded. This helper method computes the mask that
6751/// IS demanded.
6753 const APInt *RHS;
6754 if (!match(I.getOperand(1), m_APInt(RHS)))
6756
6757 // If this is a normal comparison, it demands all bits. If it is a sign bit
6758 // comparison, it only demands the sign bit.
6759 bool UnusedBit;
6760 if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
6762
6763 switch (I.getPredicate()) {
6764 // For a UGT comparison, we don't care about any bits that
6765 // correspond to the trailing ones of the comparand. The value of these
6766 // bits doesn't impact the outcome of the comparison, because any value
6767 // greater than the RHS must differ in a bit higher than these due to carry.
6768 case ICmpInst::ICMP_UGT:
6769 return APInt::getBitsSetFrom(BitWidth, RHS->countr_one());
6770
6771 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
6772 // Any value less than the RHS must differ in a higher bit because of carries.
6773 case ICmpInst::ICMP_ULT:
6774 return APInt::getBitsSetFrom(BitWidth, RHS->countr_zero());
6775
6776 default:
6778 }
6779}
6780
6781/// Check that one use is in the same block as the definition and all
6782/// other uses are in blocks dominated by a given block.
6783///
6784/// \param DI Definition
6785/// \param UI Use
6786/// \param DB Block that must dominate all uses of \p DI outside
6787/// the parent block
6788/// \return true when \p UI is the only use of \p DI in the parent block
6789/// and all other uses of \p DI are in blocks dominated by \p DB.
6790///
6792 const Instruction *UI,
6793 const BasicBlock *DB) const {
6794 assert(DI && UI && "Instruction not defined\n");
6795 // Ignore incomplete definitions.
6796 if (!DI->getParent())
6797 return false;
6798 // DI and UI must be in the same block.
6799 if (DI->getParent() != UI->getParent())
6800 return false;
6801 // Protect from self-referencing blocks.
6802 if (DI->getParent() == DB)
6803 return false;
6804 for (const User *U : DI->users()) {
6805 auto *Usr = cast<Instruction>(U);
6806 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
6807 return false;
6808 }
6809 return true;
6810}
6811
6812/// Return true when the instruction sequence within a block is select-cmp-br.
6814 const BasicBlock *BB = SI->getParent();
6815 if (!BB)
6816 return false;
6818 if (!BI || BI->getNumSuccessors() != 2)
6819 return false;
6820 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
6821 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
6822 return false;
6823 return true;
6824}
6825
6826/// True when a select result is replaced by one of its operands
6827/// in select-icmp sequence. This will eventually result in the elimination
6828/// of the select.
6829///
6830/// \param SI Select instruction
6831/// \param Icmp Compare instruction
6832/// \param SIOpd Operand that replaces the select
6833///
6834/// Notes:
6835/// - The replacement is global and requires dominator information
6836/// - The caller is responsible for the actual replacement
6837///
6838/// Example:
6839///
6840/// entry:
6841/// %4 = select i1 %3, %C* %0, %C* null
6842/// %5 = icmp eq %C* %4, null
6843/// br i1 %5, label %9, label %7
6844/// ...
6845/// ; <label>:7 ; preds = %entry
6846/// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
6847/// ...
6848///
6849/// can be transformed to
6850///
6851/// %5 = icmp eq %C* %0, null
6852/// %6 = select i1 %3, i1 %5, i1 true
6853/// br i1 %6, label %9, label %7
6854/// ...
6855/// ; <label>:7 ; preds = %entry
6856/// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
6857///
6858/// Similar when the first operand of the select is a constant or/and
6859/// the compare is for not equal rather than equal.
6860///
6861/// NOTE: The function is only called when the select and compare constants
6862/// are equal, the optimization can work only for EQ predicates. This is not a
6863/// major restriction since a NE compare should be 'normalized' to an equal
6864/// compare, which usually happens in the combiner and test case
6865/// select-cmp-br.ll checks for it.
6867 const ICmpInst *Icmp,
6868 const unsigned SIOpd) {
6869 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
6871 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
6872 // The check for the single predecessor is not the best that can be
6873 // done. But it protects efficiently against cases like when SI's
6874 // home block has two successors, Succ and Succ1, and Succ1 predecessor
6875 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
6876 // replaced can be reached on either path. So the uniqueness check
6877 // guarantees that the path all uses of SI (outside SI's parent) are on
6878 // is disjoint from all other paths out of SI. But that information
6879 // is more expensive to compute, and the trade-off here is in favor
6880 // of compile-time. It should also be noticed that we check for a single
6881 // predecessor and not only uniqueness. This to handle the situation when
6882 // Succ and Succ1 points to the same basic block.
6883 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
6884 NumSel++;
6885 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
6886 return true;
6887 }
6888 }
6889 return false;
6890}
6891
6892/// Try to fold the comparison based on range information we can get by checking
6893/// whether bits are known to be zero or one in the inputs.
6895 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6896 Type *Ty = Op0->getType();
6897 ICmpInst::Predicate Pred = I.getPredicate();
6898
6899 // Get scalar or pointer size.
6900 unsigned BitWidth = Ty->isIntOrIntVectorTy()
6901 ? Ty->getScalarSizeInBits()
6902 : DL.getPointerTypeSizeInBits(Ty->getScalarType());
6903
6904 if (!BitWidth)
6905 return nullptr;
6906
6907 KnownBits Op0Known(BitWidth);
6908 KnownBits Op1Known(BitWidth);
6909
6910 {
6911 // Don't use dominating conditions when folding icmp using known bits. This
6912 // may convert signed into unsigned predicates in ways that other passes
6913 // (especially IndVarSimplify) may not be able to reliably undo.
6914 SimplifyQuery Q = SQ.getWithoutDomCondCache().getWithInstruction(&I);
6916 Op0Known, Q))
6917 return &I;
6918
6919 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known, Q))
6920 return &I;
6921 }
6922
6923 if (!isa<Constant>(Op0) && Op0Known.isConstant())
6924 return new ICmpInst(
6925 Pred, ConstantExpr::getIntegerValue(Ty, Op0Known.getConstant()), Op1);
6926 if (!isa<Constant>(Op1) && Op1Known.isConstant())
6927 return new ICmpInst(
6928 Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Known.getConstant()));
6929
6930 if (std::optional<bool> Res = ICmpInst::compare(Op0Known, Op1Known, Pred))
6931 return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *Res));
6932
6933 // Given the known and unknown bits, compute a range that the LHS could be
6934 // in. Compute the Min, Max and RHS values based on the known bits. For the
6935 // EQ and NE we use unsigned values.
6936 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6937 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6938 if (I.isSigned()) {
6939 Op0Min = Op0Known.getSignedMinValue();
6940 Op0Max = Op0Known.getSignedMaxValue();
6941 Op1Min = Op1Known.getSignedMinValue();
6942 Op1Max = Op1Known.getSignedMaxValue();
6943 } else {
6944 Op0Min = Op0Known.getMinValue();
6945 Op0Max = Op0Known.getMaxValue();
6946 Op1Min = Op1Known.getMinValue();
6947 Op1Max = Op1Known.getMaxValue();
6948 }
6949
6950 // Don't break up a clamp pattern -- (min(max X, Y), Z) -- by replacing a
6951 // min/max canonical compare with some other compare. That could lead to
6952 // conflict with select canonicalization and infinite looping.
6953 // FIXME: This constraint may go away if min/max intrinsics are canonical.
6954 auto isMinMaxCmp = [&](Instruction &Cmp) {
6955 if (!Cmp.hasOneUse())
6956 return false;
6957 Value *A, *B;
6958 SelectPatternFlavor SPF = matchSelectPattern(Cmp.user_back(), A, B).Flavor;
6960 return false;
6961 return match(Op0, m_MaxOrMin(m_Value(), m_Value())) ||
6962 match(Op1, m_MaxOrMin(m_Value(), m_Value()));
6963 };
6964 if (!isMinMaxCmp(I)) {
6965 switch (Pred) {
6966 default:
6967 break;
6968 case ICmpInst::ICMP_ULT: {
6969 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6970 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6971 const APInt *CmpC;
6972 if (match(Op1, m_APInt(CmpC))) {
6973 // A <u C -> A == C-1 if min(A)+1 == C
6974 if (*CmpC == Op0Min + 1)
6975 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6976 ConstantInt::get(Op1->getType(), *CmpC - 1));
6977 // X <u C --> X == 0, if the number of zero bits in the bottom of X
6978 // exceeds the log2 of C.
6979 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
6980 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6981 Constant::getNullValue(Op1->getType()));
6982 }
6983 break;
6984 }
6985 case ICmpInst::ICMP_UGT: {
6986 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6987 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6988 const APInt *CmpC;
6989 if (match(Op1, m_APInt(CmpC))) {
6990 // A >u C -> A == C+1 if max(a)-1 == C
6991 if (*CmpC == Op0Max - 1)
6992 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6993 ConstantInt::get(Op1->getType(), *CmpC + 1));
6994 // X >u C --> X != 0, if the number of zero bits in the bottom of X
6995 // exceeds the log2 of C.
6996 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
6997 return new ICmpInst(ICmpInst::ICMP_NE, Op0,
6998 Constant::getNullValue(Op1->getType()));
6999 }
7000 break;
7001 }
7002 case ICmpInst::ICMP_SLT: {
7003 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
7004 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
7005 const APInt *CmpC;
7006 if (match(Op1, m_APInt(CmpC))) {
7007 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
7008 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
7009 ConstantInt::get(Op1->getType(), *CmpC - 1));
7010 }
7011 break;
7012 }
7013 case ICmpInst::ICMP_SGT: {
7014 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
7015 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
7016 const APInt *CmpC;
7017 if (match(Op1, m_APInt(CmpC))) {
7018 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
7019 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
7020 ConstantInt::get(Op1->getType(), *CmpC + 1));
7021 }
7022 break;
7023 }
7024 }
7025 }
7026
7027 // Based on the range information we know about the LHS, see if we can
7028 // simplify this comparison. For example, (x&4) < 8 is always true.
7029 switch (Pred) {
7030 default:
7031 break;
7032 case ICmpInst::ICMP_EQ:
7033 case ICmpInst::ICMP_NE: {
7034 // If all bits are known zero except for one, then we know at most one bit
7035 // is set. If the comparison is against zero, then this is a check to see if
7036 // *that* bit is set.
7037 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
7038 if (Op1Known.isZero()) {
7039 // If the LHS is an AND with the same constant, look through it.
7040 Value *LHS = nullptr;
7041 const APInt *LHSC;
7042 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
7043 *LHSC != Op0KnownZeroInverted)
7044 LHS = Op0;
7045
7046 Value *X;
7047 const APInt *C1;
7048 if (match(LHS, m_Shl(m_Power2(C1), m_Value(X)))) {
7049 Type *XTy = X->getType();
7050 unsigned Log2C1 = C1->countr_zero();
7051 APInt C2 = Op0KnownZeroInverted;
7052 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
7053 if (C2Pow2.isPowerOf2()) {
7054 // iff (C1 is pow2) & ((C2 & ~(C1-1)) + C1) is pow2):
7055 // ((C1 << X) & C2) == 0 -> X >= (Log2(C2+C1) - Log2(C1))
7056 // ((C1 << X) & C2) != 0 -> X < (Log2(C2+C1) - Log2(C1))
7057 unsigned Log2C2 = C2Pow2.countr_zero();
7058 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
7059 auto NewPred =
7061 return new ICmpInst(NewPred, X, CmpC);
7062 }
7063 }
7064 }
7065
7066 // Op0 eq C_Pow2 -> Op0 ne 0 if Op0 is known to be C_Pow2 or zero.
7067 if (Op1Known.isConstant() && Op1Known.getConstant().isPowerOf2() &&
7068 (Op0Known & Op1Known) == Op0Known)
7069 return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
7070 ConstantInt::getNullValue(Op1->getType()));
7071 break;
7072 }
7073 case ICmpInst::ICMP_SGE:
7074 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
7075 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7076 break;
7077 case ICmpInst::ICMP_SLE:
7078 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
7079 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7080 break;
7081 case ICmpInst::ICMP_UGE:
7082 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
7083 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7084 break;
7085 case ICmpInst::ICMP_ULE:
7086 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
7087 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7088 break;
7089 }
7090
7091 // Turn a signed comparison into an unsigned one if both operands are known to
7092 // have the same sign. Set samesign if possible (except for equality
7093 // predicates).
7094 if ((I.isSigned() || (I.isUnsigned() && !I.hasSameSign())) &&
7095 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
7096 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) {
7097 I.setPredicate(I.getUnsignedPredicate());
7098 I.setSameSign();
7099 return &I;
7100 }
7101
7102 return nullptr;
7103}
7104
7105/// If one operand of an icmp is effectively a bool (value range of {0,1}),
7106/// then try to reduce patterns based on that limit.
7108 Value *X, *Y;
7109 CmpPredicate Pred;
7110
7111 // X must be 0 and bool must be true for "ULT":
7112 // X <u (zext i1 Y) --> (X == 0) & Y
7113 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_ZExt(m_Value(Y))))) &&
7114 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULT)
7115 return BinaryOperator::CreateAnd(Builder.CreateIsNull(X), Y);
7116
7117 // X must be 0 or bool must be true for "ULE":
7118 // X <=u (sext i1 Y) --> (X == 0) | Y
7119 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_SExt(m_Value(Y))))) &&
7120 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULE)
7121 return BinaryOperator::CreateOr(Builder.CreateIsNull(X), Y);
7122
7123 // icmp eq/ne X, (zext/sext (icmp eq/ne X, C))
7124 CmpPredicate Pred1, Pred2;
7125 const APInt *C;
7126 Instruction *ExtI;
7127 if (match(&I, m_c_ICmp(Pred1, m_Value(X),
7130 m_APInt(C)))))) &&
7131 ICmpInst::isEquality(Pred1) && ICmpInst::isEquality(Pred2)) {
7132 bool IsSExt = ExtI->getOpcode() == Instruction::SExt;
7133 bool HasOneUse = ExtI->hasOneUse() && ExtI->getOperand(0)->hasOneUse();
7134 auto CreateRangeCheck = [&] {
7135 Value *CmpV1 =
7136 Builder.CreateICmp(Pred1, X, Constant::getNullValue(X->getType()));
7137 Value *CmpV2 = Builder.CreateICmp(
7138 Pred1, X, ConstantInt::getSigned(X->getType(), IsSExt ? -1 : 1));
7140 Pred1 == ICmpInst::ICMP_EQ ? Instruction::Or : Instruction::And,
7141 CmpV1, CmpV2);
7142 };
7143 if (C->isZero()) {
7144 if (Pred2 == ICmpInst::ICMP_EQ) {
7145 // icmp eq X, (zext/sext (icmp eq X, 0)) --> false
7146 // icmp ne X, (zext/sext (icmp eq X, 0)) --> true
7147 return replaceInstUsesWith(
7148 I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7149 } else if (!IsSExt || HasOneUse) {
7150 // icmp eq X, (zext (icmp ne X, 0)) --> X == 0 || X == 1
7151 // icmp ne X, (zext (icmp ne X, 0)) --> X != 0 && X != 1
7152 // icmp eq X, (sext (icmp ne X, 0)) --> X == 0 || X == -1
7153 // icmp ne X, (sext (icmp ne X, 0)) --> X != 0 && X != -1
7154 return CreateRangeCheck();
7155 }
7156 } else if (IsSExt ? C->isAllOnes() : C->isOne()) {
7157 if (Pred2 == ICmpInst::ICMP_NE) {
7158 // icmp eq X, (zext (icmp ne X, 1)) --> false
7159 // icmp ne X, (zext (icmp ne X, 1)) --> true
7160 // icmp eq X, (sext (icmp ne X, -1)) --> false
7161 // icmp ne X, (sext (icmp ne X, -1)) --> true
7162 return replaceInstUsesWith(
7163 I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7164 } else if (!IsSExt || HasOneUse) {
7165 // icmp eq X, (zext (icmp eq X, 1)) --> X == 0 || X == 1
7166 // icmp ne X, (zext (icmp eq X, 1)) --> X != 0 && X != 1
7167 // icmp eq X, (sext (icmp eq X, -1)) --> X == 0 || X == -1
7168 // icmp ne X, (sext (icmp eq X, -1)) --> X != 0 && X == -1
7169 return CreateRangeCheck();
7170 }
7171 } else {
7172 // when C != 0 && C != 1:
7173 // icmp eq X, (zext (icmp eq X, C)) --> icmp eq X, 0
7174 // icmp eq X, (zext (icmp ne X, C)) --> icmp eq X, 1
7175 // icmp ne X, (zext (icmp eq X, C)) --> icmp ne X, 0
7176 // icmp ne X, (zext (icmp ne X, C)) --> icmp ne X, 1
7177 // when C != 0 && C != -1:
7178 // icmp eq X, (sext (icmp eq X, C)) --> icmp eq X, 0
7179 // icmp eq X, (sext (icmp ne X, C)) --> icmp eq X, -1
7180 // icmp ne X, (sext (icmp eq X, C)) --> icmp ne X, 0
7181 // icmp ne X, (sext (icmp ne X, C)) --> icmp ne X, -1
7182 return ICmpInst::Create(
7183 Instruction::ICmp, Pred1, X,
7184 ConstantInt::getSigned(X->getType(), Pred2 == ICmpInst::ICMP_NE
7185 ? (IsSExt ? -1 : 1)
7186 : 0));
7187 }
7188 }
7189
7190 return nullptr;
7191}
7192
7193/// If we have an icmp le or icmp ge instruction with a constant operand, turn
7194/// it into the appropriate icmp lt or icmp gt instruction. This transform
7195/// allows them to be folded in visitICmpInst.
7197 ICmpInst::Predicate Pred = I.getPredicate();
7198 if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
7200 return nullptr;
7201
7202 Value *Op0 = I.getOperand(0);
7203 Value *Op1 = I.getOperand(1);
7204 auto *Op1C = dyn_cast<Constant>(Op1);
7205 if (!Op1C)
7206 return nullptr;
7207
7208 auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
7209 if (!FlippedStrictness)
7210 return nullptr;
7211
7212 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7213}
7214
7215/// If we have a comparison with a non-canonical predicate, if we can update
7216/// all the users, invert the predicate and adjust all the users.
7218 // Is the predicate already canonical?
7219 CmpInst::Predicate Pred = I.getPredicate();
7221 return nullptr;
7222
7223 // Can all users be adjusted to predicate inversion?
7224 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
7225 return nullptr;
7226
7227 // Ok, we can canonicalize comparison!
7228 // Let's first invert the comparison's predicate.
7229 I.setPredicate(CmpInst::getInversePredicate(Pred));
7230 I.setName(I.getName() + ".not");
7231
7232 // And, adapt users.
7234
7235 return &I;
7236}
7237
7238/// Integer compare with boolean values can always be turned into bitwise ops.
7240 InstCombiner::BuilderTy &Builder) {
7241 Value *A = I.getOperand(0), *B = I.getOperand(1);
7242 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
7243
7244 // A boolean compared to true/false can be simplified to Op0/true/false in
7245 // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
7246 // Cases not handled by InstSimplify are always 'not' of Op0.
7247 if (match(B, m_Zero())) {
7248 switch (I.getPredicate()) {
7249 case CmpInst::ICMP_EQ: // A == 0 -> !A
7250 case CmpInst::ICMP_ULE: // A <=u 0 -> !A
7251 case CmpInst::ICMP_SGE: // A >=s 0 -> !A
7253 default:
7254 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7255 }
7256 } else if (match(B, m_One())) {
7257 switch (I.getPredicate()) {
7258 case CmpInst::ICMP_NE: // A != 1 -> !A
7259 case CmpInst::ICMP_ULT: // A <u 1 -> !A
7260 case CmpInst::ICMP_SGT: // A >s -1 -> !A
7262 default:
7263 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7264 }
7265 }
7266
7267 switch (I.getPredicate()) {
7268 default:
7269 llvm_unreachable("Invalid icmp instruction!");
7270 case ICmpInst::ICMP_EQ:
7271 // icmp eq i1 A, B -> ~(A ^ B)
7272 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
7273
7274 case ICmpInst::ICMP_NE:
7275 // icmp ne i1 A, B -> A ^ B
7276 return BinaryOperator::CreateXor(A, B);
7277
7278 case ICmpInst::ICMP_UGT:
7279 // icmp ugt -> icmp ult
7280 std::swap(A, B);
7281 [[fallthrough]];
7282 case ICmpInst::ICMP_ULT:
7283 // icmp ult i1 A, B -> ~A & B
7284 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
7285
7286 case ICmpInst::ICMP_SGT:
7287 // icmp sgt -> icmp slt
7288 std::swap(A, B);
7289 [[fallthrough]];
7290 case ICmpInst::ICMP_SLT:
7291 // icmp slt i1 A, B -> A & ~B
7292 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
7293
7294 case ICmpInst::ICMP_UGE:
7295 // icmp uge -> icmp ule
7296 std::swap(A, B);
7297 [[fallthrough]];
7298 case ICmpInst::ICMP_ULE:
7299 // icmp ule i1 A, B -> ~A | B
7300 return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
7301
7302 case ICmpInst::ICMP_SGE:
7303 // icmp sge -> icmp sle
7304 std::swap(A, B);
7305 [[fallthrough]];
7306 case ICmpInst::ICMP_SLE:
7307 // icmp sle i1 A, B -> A | ~B
7308 return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
7309 }
7310}
7311
7312// Transform pattern like:
7313// (1 << Y) u<= X or ~(-1 << Y) u< X or ((1 << Y)+(-1)) u< X
7314// (1 << Y) u> X or ~(-1 << Y) u>= X or ((1 << Y)+(-1)) u>= X
7315// Into:
7316// (X l>> Y) != 0
7317// (X l>> Y) == 0
7319 InstCombiner::BuilderTy &Builder) {
7320 CmpPredicate Pred, NewPred;
7321 Value *X, *Y;
7322 if (match(&Cmp,
7323 m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
7324 switch (Pred) {
7325 case ICmpInst::ICMP_ULE:
7326 NewPred = ICmpInst::ICMP_NE;
7327 break;
7328 case ICmpInst::ICMP_UGT:
7329 NewPred = ICmpInst::ICMP_EQ;
7330 break;
7331 default:
7332 return nullptr;
7333 }
7334 } else if (match(&Cmp, m_c_ICmp(Pred,
7337 m_Add(m_Shl(m_One(), m_Value(Y)),
7338 m_AllOnes()))),
7339 m_Value(X)))) {
7340 // The variant with 'add' is not canonical, (the variant with 'not' is)
7341 // we only get it because it has extra uses, and can't be canonicalized,
7342
7343 switch (Pred) {
7344 case ICmpInst::ICMP_ULT:
7345 NewPred = ICmpInst::ICMP_NE;
7346 break;
7347 case ICmpInst::ICMP_UGE:
7348 NewPred = ICmpInst::ICMP_EQ;
7349 break;
7350 default:
7351 return nullptr;
7352 }
7353 } else
7354 return nullptr;
7355
7356 Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
7357 Constant *Zero = Constant::getNullValue(NewX->getType());
7358 return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
7359}
7360
7362 InstCombiner::BuilderTy &Builder) {
7363 const CmpInst::Predicate Pred = Cmp.getPredicate();
7364 Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
7365 Value *V1, *V2;
7366
7367 auto createCmpReverse = [&](CmpInst::Predicate Pred, Value *X, Value *Y) {
7368 Value *V = Builder.CreateCmp(Pred, X, Y, Cmp.getName());
7369 if (auto *I = dyn_cast<Instruction>(V))
7370 I->copyIRFlags(&Cmp);
7371 Module *M = Cmp.getModule();
7373 M, Intrinsic::vector_reverse, V->getType());
7374 return CallInst::Create(F, V);
7375 };
7376
7377 if (match(LHS, m_VecReverse(m_Value(V1)))) {
7378 // cmp Pred, rev(V1), rev(V2) --> rev(cmp Pred, V1, V2)
7379 if (match(RHS, m_VecReverse(m_Value(V2))) &&
7380 (LHS->hasOneUse() || RHS->hasOneUse()))
7381 return createCmpReverse(Pred, V1, V2);
7382
7383 // cmp Pred, rev(V1), RHSSplat --> rev(cmp Pred, V1, RHSSplat)
7384 if (LHS->hasOneUse() && isSplatValue(RHS))
7385 return createCmpReverse(Pred, V1, RHS);
7386 }
7387 // cmp Pred, LHSSplat, rev(V2) --> rev(cmp Pred, LHSSplat, V2)
7388 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
7389 return createCmpReverse(Pred, LHS, V2);
7390
7391 ArrayRef<int> M;
7392 if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
7393 return nullptr;
7394
7395 // If both arguments of the cmp are shuffles that use the same mask and
7396 // shuffle within a single vector, move the shuffle after the cmp:
7397 // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
7398 Type *V1Ty = V1->getType();
7399 if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
7400 V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
7401 Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
7402 return new ShuffleVectorInst(NewCmp, M);
7403 }
7404
7405 // Try to canonicalize compare with splatted operand and splat constant.
7406 // TODO: We could generalize this for more than splats. See/use the code in
7407 // InstCombiner::foldVectorBinop().
7408 Constant *C;
7409 if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
7410 return nullptr;
7411
7412 // Length-changing splats are ok, so adjust the constants as needed:
7413 // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
7414 Constant *ScalarC = C->getSplatValue(/* AllowPoison */ true);
7415 int MaskSplatIndex;
7416 if (ScalarC && match(M, m_SplatOrPoisonMask(MaskSplatIndex))) {
7417 // We allow poison in matching, but this transform removes it for safety.
7418 // Demanded elements analysis should be able to recover some/all of that.
7419 C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
7420 ScalarC);
7421 SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
7422 Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
7423 return new ShuffleVectorInst(NewCmp, NewM);
7424 }
7425
7426 return nullptr;
7427}
7428
7429// extract(uadd.with.overflow(A, B), 0) ult A
7430// -> extract(uadd.with.overflow(A, B), 1)
7432 CmpInst::Predicate Pred = I.getPredicate();
7433 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7434
7435 Value *UAddOv;
7436 Value *A, *B;
7437 auto UAddOvResultPat = m_ExtractValue<0>(
7439 if (match(Op0, UAddOvResultPat) &&
7440 ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
7441 (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
7442 (match(A, m_One()) || match(B, m_One()))) ||
7443 (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
7444 (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
7445 // extract(uadd.with.overflow(A, B), 0) < A
7446 // extract(uadd.with.overflow(A, 1), 0) == 0
7447 // extract(uadd.with.overflow(A, -1), 0) != -1
7448 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
7449 else if (match(Op1, UAddOvResultPat) && Pred == ICmpInst::ICMP_UGT &&
7450 (Op0 == A || Op0 == B))
7451 // A > extract(uadd.with.overflow(A, B), 0)
7452 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
7453 else
7454 return nullptr;
7455
7456 return ExtractValueInst::Create(UAddOv, 1);
7457}
7458
7460 if (!I.getOperand(0)->getType()->isPointerTy() ||
7462 I.getParent()->getParent(),
7463 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7464 return nullptr;
7465 }
7466 Instruction *Op;
7467 if (match(I.getOperand(0), m_Instruction(Op)) &&
7468 match(I.getOperand(1), m_Zero()) &&
7469 Op->isLaunderOrStripInvariantGroup()) {
7470 return ICmpInst::Create(Instruction::ICmp, I.getPredicate(),
7471 Op->getOperand(0), I.getOperand(1));
7472 }
7473 return nullptr;
7474}
7475
7476/// This function folds patterns produced by lowering of reduce idioms, such as
7477/// llvm.vector.reduce.and which are lowered into instruction chains. This code
7478/// attempts to generate fewer number of scalar comparisons instead of vector
7479/// comparisons when possible.
7481 InstCombiner::BuilderTy &Builder,
7482 const DataLayout &DL) {
7483 if (I.getType()->isVectorTy())
7484 return nullptr;
7485 CmpPredicate OuterPred, InnerPred;
7486 Value *LHS, *RHS;
7487
7488 // Match lowering of @llvm.vector.reduce.and. Turn
7489 /// %vec_ne = icmp ne <8 x i8> %lhs, %rhs
7490 /// %scalar_ne = bitcast <8 x i1> %vec_ne to i8
7491 /// %res = icmp <pred> i8 %scalar_ne, 0
7492 ///
7493 /// into
7494 ///
7495 /// %lhs.scalar = bitcast <8 x i8> %lhs to i64
7496 /// %rhs.scalar = bitcast <8 x i8> %rhs to i64
7497 /// %res = icmp <pred> i64 %lhs.scalar, %rhs.scalar
7498 ///
7499 /// for <pred> in {ne, eq}.
7500 if (!match(&I, m_ICmp(OuterPred,
7502 m_ICmp(InnerPred, m_Value(LHS), m_Value(RHS))))),
7503 m_Zero())))
7504 return nullptr;
7505 auto *LHSTy = dyn_cast<FixedVectorType>(LHS->getType());
7506 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7507 return nullptr;
7508 unsigned NumBits =
7509 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7510 // TODO: Relax this to "not wider than max legal integer type"?
7511 if (!DL.isLegalInteger(NumBits))
7512 return nullptr;
7513
7514 if (ICmpInst::isEquality(OuterPred) && InnerPred == ICmpInst::ICMP_NE) {
7515 auto *ScalarTy = Builder.getIntNTy(NumBits);
7516 LHS = Builder.CreateBitCast(LHS, ScalarTy, LHS->getName() + ".scalar");
7517 RHS = Builder.CreateBitCast(RHS, ScalarTy, RHS->getName() + ".scalar");
7518 return ICmpInst::Create(Instruction::ICmp, OuterPred, LHS, RHS,
7519 I.getName());
7520 }
7521
7522 return nullptr;
7523}
7524
7525// This helper will be called with icmp operands in both orders.
7527 Value *Op0, Value *Op1,
7528 ICmpInst &CxtI) {
7529 // Try to optimize 'icmp GEP, P' or 'icmp P, GEP'.
7530 if (auto *GEP = dyn_cast<GEPOperator>(Op0))
7531 if (Instruction *NI = foldGEPICmp(GEP, Op1, Pred, CxtI))
7532 return NI;
7533
7534 if (auto *SI = dyn_cast<SelectInst>(Op0))
7535 if (Instruction *NI = foldSelectICmp(Pred, SI, Op1, CxtI))
7536 return NI;
7537
7538 if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0)) {
7539 if (Instruction *Res = foldICmpWithMinMax(CxtI, MinMax, Op1, Pred))
7540 return Res;
7541
7542 if (Instruction *Res = foldICmpWithClamp(CxtI, Op1, MinMax))
7543 return Res;
7544 }
7545
7546 {
7547 Value *X;
7548 const APInt *C;
7549 // icmp X+Cst, X
7550 if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
7551 return foldICmpAddOpConst(X, *C, Pred);
7552 }
7553
7554 // abs(X) >= X --> true
7555 // abs(X) u<= X --> true
7556 // abs(X) < X --> false
7557 // abs(X) u> X --> false
7558 // abs(X) u>= X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7559 // abs(X) <= X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7560 // abs(X) == X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7561 // abs(X) u< X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7562 // abs(X) > X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7563 // abs(X) != X --> IsIntMinPosion ? `X < 0` : `X > INTMIN`
7564 {
7565 Value *X;
7566 Constant *C;
7568 match(Op1, m_Specific(X))) {
7569 Value *NullValue = Constant::getNullValue(X->getType());
7570 Value *AllOnesValue = Constant::getAllOnesValue(X->getType());
7571 const APInt SMin =
7572 APInt::getSignedMinValue(X->getType()->getScalarSizeInBits());
7573 bool IsIntMinPosion = C->isAllOnesValue();
7574 switch (Pred) {
7575 case CmpInst::ICMP_ULE:
7576 case CmpInst::ICMP_SGE:
7577 return replaceInstUsesWith(CxtI, ConstantInt::getTrue(CxtI.getType()));
7578 case CmpInst::ICMP_UGT:
7579 case CmpInst::ICMP_SLT:
7581 case CmpInst::ICMP_UGE:
7582 case CmpInst::ICMP_SLE:
7583 case CmpInst::ICMP_EQ: {
7584 return replaceInstUsesWith(
7585 CxtI, IsIntMinPosion
7586 ? Builder.CreateICmpSGT(X, AllOnesValue)
7587 : Builder.CreateICmpULT(
7588 X, ConstantInt::get(X->getType(), SMin + 1)));
7589 }
7590 case CmpInst::ICMP_ULT:
7591 case CmpInst::ICMP_SGT:
7592 case CmpInst::ICMP_NE: {
7593 return replaceInstUsesWith(
7594 CxtI, IsIntMinPosion
7595 ? Builder.CreateICmpSLT(X, NullValue)
7596 : Builder.CreateICmpUGT(
7597 X, ConstantInt::get(X->getType(), SMin)));
7598 }
7599 default:
7600 llvm_unreachable("Invalid predicate!");
7601 }
7602 }
7603 }
7604
7605 const SimplifyQuery Q = SQ.getWithInstruction(&CxtI);
7606 if (Value *V = foldICmpWithLowBitMaskedVal(Pred, Op0, Op1, Q, *this))
7607 return replaceInstUsesWith(CxtI, V);
7608
7609 // Folding (X / Y) pred X => X swap(pred) 0 for constant Y other than 0 or 1
7610 auto CheckUGT1 = [](const APInt &Divisor) { return Divisor.ugt(1); };
7611 {
7612 if (match(Op0, m_UDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7613 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7615 }
7616
7617 if (!ICmpInst::isUnsigned(Pred) &&
7618 match(Op0, m_SDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7619 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7621 }
7622 }
7623
7624 // Another case of this fold is (X >> Y) pred X => X swap(pred) 0 if Y != 0
7625 auto CheckNE0 = [](const APInt &Shift) { return !Shift.isZero(); };
7626 {
7627 if (match(Op0, m_LShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7628 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7630 }
7631
7632 if ((Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SGE) &&
7633 match(Op0, m_AShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7634 return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7636 }
7637 }
7638
7639 return nullptr;
7640}
7641
7643 bool Changed = false;
7644 const SimplifyQuery Q = SQ.getWithInstruction(&I);
7645 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7646 unsigned Op0Cplxity = getComplexity(Op0);
7647 unsigned Op1Cplxity = getComplexity(Op1);
7648
7649 /// Orders the operands of the compare so that they are listed from most
7650 /// complex to least complex. This puts constants before unary operators,
7651 /// before binary operators.
7652 if (Op0Cplxity < Op1Cplxity) {
7653 I.swapOperands();
7654 std::swap(Op0, Op1);
7655 Changed = true;
7656 }
7657
7658 if (Value *V = simplifyICmpInst(I.getCmpPredicate(), Op0, Op1, Q))
7659 return replaceInstUsesWith(I, V);
7660
7661 // Comparing -val or val with non-zero is the same as just comparing val
7662 // ie, abs(val) != 0 -> val != 0
7663 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
7664 Value *Cond, *SelectTrue, *SelectFalse;
7665 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
7666 m_Value(SelectFalse)))) {
7667 if (Value *V = dyn_castNegVal(SelectTrue)) {
7668 if (V == SelectFalse)
7669 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7670 } else if (Value *V = dyn_castNegVal(SelectFalse)) {
7671 if (V == SelectTrue)
7672 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7673 }
7674 }
7675 }
7676
7678 return Res;
7679
7680 if (Op0->getType()->isIntOrIntVectorTy(1))
7682 return Res;
7683
7685 return Res;
7686
7688 return Res;
7689
7691 return Res;
7692
7694 return Res;
7695
7697 return Res;
7698
7700 return Res;
7701
7703 return Res;
7704
7705 // Test if the ICmpInst instruction is used exclusively by a select as
7706 // part of a minimum or maximum operation. If so, refrain from doing
7707 // any other folding. This helps out other analyses which understand
7708 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
7709 // and CodeGen. And in this case, at least one of the comparison
7710 // operands has at least one user besides the compare (the select),
7711 // which would often largely negate the benefit of folding anyway.
7712 //
7713 // Do the same for the other patterns recognized by matchSelectPattern.
7714 if (I.hasOneUse())
7715 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
7716 Value *A, *B;
7718 if (SPR.Flavor != SPF_UNKNOWN)
7719 return nullptr;
7720 }
7721
7722 // Do this after checking for min/max to prevent infinite looping.
7723 if (Instruction *Res = foldICmpWithZero(I))
7724 return Res;
7725
7726 // FIXME: We only do this after checking for min/max to prevent infinite
7727 // looping caused by a reverse canonicalization of these patterns for min/max.
7728 // FIXME: The organization of folds is a mess. These would naturally go into
7729 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
7730 // down here after the min/max restriction.
7731 ICmpInst::Predicate Pred = I.getPredicate();
7732 const APInt *C;
7733 if (match(Op1, m_APInt(C))) {
7734 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
7735 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
7736 Constant *Zero = Constant::getNullValue(Op0->getType());
7737 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
7738 }
7739
7740 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
7741 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
7743 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
7744 }
7745 }
7746
7747 // The folds in here may rely on wrapping flags and special constants, so
7748 // they can break up min/max idioms in some cases but not seemingly similar
7749 // patterns.
7750 // FIXME: It may be possible to enhance select folding to make this
7751 // unnecessary. It may also be moot if we canonicalize to min/max
7752 // intrinsics.
7753 if (Instruction *Res = foldICmpBinOp(I, Q))
7754 return Res;
7755
7757 return Res;
7758
7759 // Try to match comparison as a sign bit test. Intentionally do this after
7760 // foldICmpInstWithConstant() to potentially let other folds to happen first.
7761 if (Instruction *New = foldSignBitTest(I))
7762 return New;
7763
7764 if (auto *PN = dyn_cast<PHINode>(Op0))
7765 if (Instruction *NV = foldOpIntoPhi(I, PN))
7766 return NV;
7767 if (auto *PN = dyn_cast<PHINode>(Op1))
7768 if (Instruction *NV = foldOpIntoPhi(I, PN))
7769 return NV;
7770
7772 return Res;
7773
7774 if (Instruction *Res = foldICmpCommutative(I.getCmpPredicate(), Op0, Op1, I))
7775 return Res;
7776 if (Instruction *Res =
7777 foldICmpCommutative(I.getSwappedCmpPredicate(), Op1, Op0, I))
7778 return Res;
7779
7780 if (I.isCommutative()) {
7781 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
7782 replaceOperand(I, 0, Pair->first);
7783 replaceOperand(I, 1, Pair->second);
7784 return &I;
7785 }
7786 }
7787
7788 // In case of a comparison with two select instructions having the same
7789 // condition, check whether one of the resulting branches can be simplified.
7790 // If so, just compare the other branch and select the appropriate result.
7791 // For example:
7792 // %tmp1 = select i1 %cmp, i32 %y, i32 %x
7793 // %tmp2 = select i1 %cmp, i32 %z, i32 %x
7794 // %cmp2 = icmp slt i32 %tmp2, %tmp1
7795 // The icmp will result false for the false value of selects and the result
7796 // will depend upon the comparison of true values of selects if %cmp is
7797 // true. Thus, transform this into:
7798 // %cmp = icmp slt i32 %y, %z
7799 // %sel = select i1 %cond, i1 %cmp, i1 false
7800 // This handles similar cases to transform.
7801 {
7802 Value *Cond, *A, *B, *C, *D;
7803 if (match(Op0, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
7805 (Op0->hasOneUse() || Op1->hasOneUse())) {
7806 // Check whether comparison of TrueValues can be simplified
7807 if (Value *Res = simplifyICmpInst(Pred, A, C, SQ)) {
7808 Value *NewICMP = Builder.CreateICmp(Pred, B, D);
7809 return SelectInst::Create(Cond, Res, NewICMP);
7810 }
7811 // Check whether comparison of FalseValues can be simplified
7812 if (Value *Res = simplifyICmpInst(Pred, B, D, SQ)) {
7813 Value *NewICMP = Builder.CreateICmp(Pred, A, C);
7814 return SelectInst::Create(Cond, NewICMP, Res);
7815 }
7816 }
7817 }
7818
7819 // icmp slt (sub nsw x, y), (add nsw x, y) --> icmp sgt y, 0
7820 // icmp ult (sub nuw x, y), (add nuw x, y) --> icmp ugt y, 0
7821 // icmp eq (sub nsw/nuw x, y), (add nsw/nuw x, y) --> icmp eq y, 0
7822 {
7823 Value *A, *B;
7824 CmpPredicate CmpPred;
7825 if (match(&I, m_c_ICmp(CmpPred, m_Sub(m_Value(A), m_Value(B)),
7827 auto *I0 = cast<OverflowingBinaryOperator>(Op0);
7828 auto *I1 = cast<OverflowingBinaryOperator>(Op1);
7829 bool I0NUW = I0->hasNoUnsignedWrap();
7830 bool I1NUW = I1->hasNoUnsignedWrap();
7831 bool I0NSW = I0->hasNoSignedWrap();
7832 bool I1NSW = I1->hasNoSignedWrap();
7833 if ((ICmpInst::isUnsigned(Pred) && I0NUW && I1NUW) ||
7834 (ICmpInst::isSigned(Pred) && I0NSW && I1NSW) ||
7835 (ICmpInst::isEquality(Pred) &&
7836 ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
7837 return new ICmpInst(CmpPredicate::getSwapped(CmpPred), B,
7838 ConstantInt::get(Op0->getType(), 0));
7839 }
7840 }
7841 }
7842
7843 // Try to optimize equality comparisons against alloca-based pointers.
7844 if (Op0->getType()->isPointerTy() && I.isEquality()) {
7845 assert(Op1->getType()->isPointerTy() &&
7846 "Comparing pointer with non-pointer?");
7847 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
7848 if (foldAllocaCmp(Alloca))
7849 return nullptr;
7850 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
7851 if (foldAllocaCmp(Alloca))
7852 return nullptr;
7853 }
7854
7855 if (Instruction *Res = foldICmpBitCast(I))
7856 return Res;
7857
7858 // TODO: Hoist this above the min/max bailout.
7860 return R;
7861
7862 {
7863 Value *X, *Y;
7864 // Transform (X & ~Y) == 0 --> (X & Y) != 0
7865 // and (X & ~Y) != 0 --> (X & Y) == 0
7866 // if A is a power of 2.
7867 if (match(Op0, m_And(m_Value(X), m_Not(m_Value(Y)))) &&
7868 match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(X, false, &I) &&
7869 I.isEquality())
7870 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
7871 Op1);
7872
7873 // Op0 pred Op1 -> ~Op1 pred ~Op0, if this allows us to drop an instruction.
7874 if (Op0->getType()->isIntOrIntVectorTy()) {
7875 bool ConsumesOp0, ConsumesOp1;
7876 if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
7877 isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
7878 (ConsumesOp0 || ConsumesOp1)) {
7879 Value *InvOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
7880 Value *InvOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
7881 assert(InvOp0 && InvOp1 &&
7882 "Mismatch between isFreeToInvert and getFreelyInverted");
7883 return new ICmpInst(I.getSwappedPredicate(), InvOp0, InvOp1);
7884 }
7885 }
7886
7887 Instruction *AddI = nullptr;
7889 m_Instruction(AddI))) &&
7890 isa<IntegerType>(X->getType())) {
7891 Value *Result;
7892 Constant *Overflow;
7893 // m_UAddWithOverflow can match patterns that do not include an explicit
7894 // "add" instruction, so check the opcode of the matched op.
7895 if (AddI->getOpcode() == Instruction::Add &&
7896 OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, X, Y, *AddI,
7897 Result, Overflow)) {
7898 replaceInstUsesWith(*AddI, Result);
7899 eraseInstFromFunction(*AddI);
7900 return replaceInstUsesWith(I, Overflow);
7901 }
7902 }
7903
7904 // (zext X) * (zext Y) --> llvm.umul.with.overflow.
7905 if (match(Op0, m_NUWMul(m_ZExt(m_Value(X)), m_ZExt(m_Value(Y)))) &&
7906 match(Op1, m_APInt(C))) {
7907 if (Instruction *R = processUMulZExtIdiom(I, Op0, C, *this))
7908 return R;
7909 }
7910
7911 // Signbit test folds
7912 // Fold (X u>> BitWidth - 1 Pred ZExt(i1)) --> X s< 0 Pred i1
7913 // Fold (X s>> BitWidth - 1 Pred SExt(i1)) --> X s< 0 Pred i1
7914 Instruction *ExtI;
7915 if ((I.isUnsigned() || I.isEquality()) &&
7916 match(Op1,
7918 Y->getType()->getScalarSizeInBits() == 1 &&
7919 (Op0->hasOneUse() || Op1->hasOneUse())) {
7920 unsigned OpWidth = Op0->getType()->getScalarSizeInBits();
7921 Instruction *ShiftI;
7922 if (match(Op0, m_CombineAnd(m_Instruction(ShiftI),
7924 OpWidth - 1))))) {
7925 unsigned ExtOpc = ExtI->getOpcode();
7926 unsigned ShiftOpc = ShiftI->getOpcode();
7927 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
7928 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
7929 Value *SLTZero =
7930 Builder.CreateICmpSLT(X, Constant::getNullValue(X->getType()));
7931 Value *Cmp = Builder.CreateICmp(Pred, SLTZero, Y, I.getName());
7932 return replaceInstUsesWith(I, Cmp);
7933 }
7934 }
7935 }
7936 }
7937
7938 if (Instruction *Res = foldICmpEquality(I))
7939 return Res;
7940
7942 return Res;
7943
7944 if (Instruction *Res = foldICmpOfUAddOv(I))
7945 return Res;
7946
7947 // The 'cmpxchg' instruction returns an aggregate containing the old value and
7948 // an i1 which indicates whether or not we successfully did the swap.
7949 //
7950 // Replace comparisons between the old value and the expected value with the
7951 // indicator that 'cmpxchg' returns.
7952 //
7953 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
7954 // spuriously fail. In those cases, the old value may equal the expected
7955 // value but it is possible for the swap to not occur.
7956 if (I.getPredicate() == ICmpInst::ICMP_EQ)
7957 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
7958 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
7959 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
7960 !ACXI->isWeak())
7961 return ExtractValueInst::Create(ACXI, 1);
7962
7964 return Res;
7965
7966 if (I.getType()->isVectorTy())
7967 if (Instruction *Res = foldVectorCmp(I, Builder))
7968 return Res;
7969
7971 return Res;
7972
7974 return Res;
7975
7976 {
7977 Value *A;
7978 const APInt *C1, *C2;
7979 ICmpInst::Predicate Pred = I.getPredicate();
7980 if (ICmpInst::isEquality(Pred)) {
7981 // sext(a) & c1 == c2 --> a & c3 == trunc(c2)
7982 // sext(a) & c1 != c2 --> a & c3 != trunc(c2)
7983 if (match(Op0, m_And(m_SExt(m_Value(A)), m_APInt(C1))) &&
7984 match(Op1, m_APInt(C2))) {
7985 Type *InputTy = A->getType();
7986 unsigned InputBitWidth = InputTy->getScalarSizeInBits();
7987 // c2 must be non-negative at the bitwidth of a.
7988 if (C2->getActiveBits() < InputBitWidth) {
7989 APInt TruncC1 = C1->trunc(InputBitWidth);
7990 // Check if there are 1s in C1 high bits of size InputBitWidth.
7991 if (C1->uge(APInt::getOneBitSet(C1->getBitWidth(), InputBitWidth)))
7992 TruncC1.setBit(InputBitWidth - 1);
7993 Value *AndInst = Builder.CreateAnd(A, TruncC1);
7994 return new ICmpInst(
7995 Pred, AndInst,
7996 ConstantInt::get(InputTy, C2->trunc(InputBitWidth)));
7997 }
7998 }
7999 }
8000 }
8001
8002 return Changed ? &I : nullptr;
8003}
8004
8005/// Fold fcmp ([us]itofp x, cst) if possible.
8007 Instruction *LHSI,
8008 Constant *RHSC) {
8009 const APFloat *RHS;
8010 if (!match(RHSC, m_APFloat(RHS)))
8011 return nullptr;
8012
8013 // Get the width of the mantissa. We don't want to hack on conversions that
8014 // might lose information from the integer, e.g. "i64 -> float"
8015 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
8016 if (MantissaWidth == -1)
8017 return nullptr; // Unknown.
8018
8019 Type *IntTy = LHSI->getOperand(0)->getType();
8020 unsigned IntWidth = IntTy->getScalarSizeInBits();
8021 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
8022
8023 if (I.isEquality()) {
8024 FCmpInst::Predicate P = I.getPredicate();
8025 bool IsExact = false;
8026 APSInt RHSCvt(IntWidth, LHSUnsigned);
8027 RHS->convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
8028
8029 // If the floating point constant isn't an integer value, we know if we will
8030 // ever compare equal / not equal to it.
8031 if (!IsExact) {
8032 // TODO: Can never be -0.0 and other non-representable values
8033 APFloat RHSRoundInt(*RHS);
8035 if (*RHS != RHSRoundInt) {
8037 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8038
8040 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8041 }
8042 }
8043
8044 // TODO: If the constant is exactly representable, is it always OK to do
8045 // equality compares as integer?
8046 }
8047
8048 // Check to see that the input is converted from an integer type that is small
8049 // enough that preserves all bits. TODO: check here for "known" sign bits.
8050 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
8051
8052 // Following test does NOT adjust IntWidth downwards for signed inputs,
8053 // because the most negative value still requires all the mantissa bits
8054 // to distinguish it from one less than that value.
8055 if ((int)IntWidth > MantissaWidth) {
8056 // Conversion would lose accuracy. Check if loss can impact comparison.
8057 int Exp = ilogb(*RHS);
8058 if (Exp == APFloat::IEK_Inf) {
8059 int MaxExponent = ilogb(APFloat::getLargest(RHS->getSemantics()));
8060 if (MaxExponent < (int)IntWidth - !LHSUnsigned)
8061 // Conversion could create infinity.
8062 return nullptr;
8063 } else {
8064 // Note that if RHS is zero or NaN, then Exp is negative
8065 // and first condition is trivially false.
8066 if (MantissaWidth <= Exp && Exp <= (int)IntWidth - !LHSUnsigned)
8067 // Conversion could affect comparison.
8068 return nullptr;
8069 }
8070 }
8071
8072 // Otherwise, we can potentially simplify the comparison. We know that it
8073 // will always come through as an integer value and we know the constant is
8074 // not a NAN (it would have been previously simplified).
8075 assert(!RHS->isNaN() && "NaN comparison not already folded!");
8076
8078 switch (I.getPredicate()) {
8079 default:
8080 llvm_unreachable("Unexpected predicate!");
8081 case FCmpInst::FCMP_UEQ:
8082 case FCmpInst::FCMP_OEQ:
8083 Pred = ICmpInst::ICMP_EQ;
8084 break;
8085 case FCmpInst::FCMP_UGT:
8086 case FCmpInst::FCMP_OGT:
8087 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
8088 break;
8089 case FCmpInst::FCMP_UGE:
8090 case FCmpInst::FCMP_OGE:
8091 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
8092 break;
8093 case FCmpInst::FCMP_ULT:
8094 case FCmpInst::FCMP_OLT:
8095 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
8096 break;
8097 case FCmpInst::FCMP_ULE:
8098 case FCmpInst::FCMP_OLE:
8099 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
8100 break;
8101 case FCmpInst::FCMP_UNE:
8102 case FCmpInst::FCMP_ONE:
8103 Pred = ICmpInst::ICMP_NE;
8104 break;
8105 case FCmpInst::FCMP_ORD:
8106 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8107 case FCmpInst::FCMP_UNO:
8108 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8109 }
8110
8111 // Now we know that the APFloat is a normal number, zero or inf.
8112
8113 // See if the FP constant is too large for the integer. For example,
8114 // comparing an i8 to 300.0.
8115 if (!LHSUnsigned) {
8116 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
8117 // and large values.
8118 APFloat SMax(RHS->getSemantics());
8119 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
8121 if (SMax < *RHS) { // smax < 13123.0
8122 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
8123 Pred == ICmpInst::ICMP_SLE)
8124 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8125 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8126 }
8127 } else {
8128 // If the RHS value is > UnsignedMax, fold the comparison. This handles
8129 // +INF and large values.
8130 APFloat UMax(RHS->getSemantics());
8131 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
8133 if (UMax < *RHS) { // umax < 13123.0
8134 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
8135 Pred == ICmpInst::ICMP_ULE)
8136 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8137 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8138 }
8139 }
8140
8141 if (!LHSUnsigned) {
8142 // See if the RHS value is < SignedMin.
8143 APFloat SMin(RHS->getSemantics());
8144 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
8146 if (SMin > *RHS) { // smin > 12312.0
8147 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
8148 Pred == ICmpInst::ICMP_SGE)
8149 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8150 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8151 }
8152 } else {
8153 // See if the RHS value is < UnsignedMin.
8154 APFloat UMin(RHS->getSemantics());
8155 UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
8157 if (UMin > *RHS) { // umin > 12312.0
8158 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
8159 Pred == ICmpInst::ICMP_UGE)
8160 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8161 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8162 }
8163 }
8164
8165 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
8166 // [0, UMAX], but it may still be fractional. Check whether this is the case
8167 // using the IsExact flag.
8168 // Don't do this for zero, because -0.0 is not fractional.
8169 APSInt RHSInt(IntWidth, LHSUnsigned);
8170 bool IsExact;
8171 RHS->convertToInteger(RHSInt, APFloat::rmTowardZero, &IsExact);
8172 if (!RHS->isZero()) {
8173 if (!IsExact) {
8174 // If we had a comparison against a fractional value, we have to adjust
8175 // the compare predicate and sometimes the value. RHSC is rounded towards
8176 // zero at this point.
8177 switch (Pred) {
8178 default:
8179 llvm_unreachable("Unexpected integer comparison!");
8180 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
8181 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8182 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
8183 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8184 case ICmpInst::ICMP_ULE:
8185 // (float)int <= 4.4 --> int <= 4
8186 // (float)int <= -4.4 --> false
8187 if (RHS->isNegative())
8188 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8189 break;
8190 case ICmpInst::ICMP_SLE:
8191 // (float)int <= 4.4 --> int <= 4
8192 // (float)int <= -4.4 --> int < -4
8193 if (RHS->isNegative())
8194 Pred = ICmpInst::ICMP_SLT;
8195 break;
8196 case ICmpInst::ICMP_ULT:
8197 // (float)int < -4.4 --> false
8198 // (float)int < 4.4 --> int <= 4
8199 if (RHS->isNegative())
8200 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8201 Pred = ICmpInst::ICMP_ULE;
8202 break;
8203 case ICmpInst::ICMP_SLT:
8204 // (float)int < -4.4 --> int < -4
8205 // (float)int < 4.4 --> int <= 4
8206 if (!RHS->isNegative())
8207 Pred = ICmpInst::ICMP_SLE;
8208 break;
8209 case ICmpInst::ICMP_UGT:
8210 // (float)int > 4.4 --> int > 4
8211 // (float)int > -4.4 --> true
8212 if (RHS->isNegative())
8213 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8214 break;
8215 case ICmpInst::ICMP_SGT:
8216 // (float)int > 4.4 --> int > 4
8217 // (float)int > -4.4 --> int >= -4
8218 if (RHS->isNegative())
8219 Pred = ICmpInst::ICMP_SGE;
8220 break;
8221 case ICmpInst::ICMP_UGE:
8222 // (float)int >= -4.4 --> true
8223 // (float)int >= 4.4 --> int > 4
8224 if (RHS->isNegative())
8225 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8226 Pred = ICmpInst::ICMP_UGT;
8227 break;
8228 case ICmpInst::ICMP_SGE:
8229 // (float)int >= -4.4 --> int >= -4
8230 // (float)int >= 4.4 --> int > 4
8231 if (!RHS->isNegative())
8232 Pred = ICmpInst::ICMP_SGT;
8233 break;
8234 }
8235 }
8236 }
8237
8238 // Lower this FP comparison into an appropriate integer version of the
8239 // comparison.
8240 return new ICmpInst(Pred, LHSI->getOperand(0),
8241 ConstantInt::get(LHSI->getOperand(0)->getType(), RHSInt));
8242}
8243
8244/// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
8246 Constant *RHSC) {
8247 // When C is not 0.0 and infinities are not allowed:
8248 // (C / X) < 0.0 is a sign-bit test of X
8249 // (C / X) < 0.0 --> X < 0.0 (if C is positive)
8250 // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
8251 //
8252 // Proof:
8253 // Multiply (C / X) < 0.0 by X * X / C.
8254 // - X is non zero, if it is the flag 'ninf' is violated.
8255 // - C defines the sign of X * X * C. Thus it also defines whether to swap
8256 // the predicate. C is also non zero by definition.
8257 //
8258 // Thus X * X / C is non zero and the transformation is valid. [qed]
8259
8260 FCmpInst::Predicate Pred = I.getPredicate();
8261
8262 // Check that predicates are valid.
8263 if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
8264 (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
8265 return nullptr;
8266
8267 // Check that RHS operand is zero.
8268 if (!match(RHSC, m_AnyZeroFP()))
8269 return nullptr;
8270
8271 // Check fastmath flags ('ninf').
8272 if (!LHSI->hasNoInfs() || !I.hasNoInfs())
8273 return nullptr;
8274
8275 // Check the properties of the dividend. It must not be zero to avoid a
8276 // division by zero (see Proof).
8277 const APFloat *C;
8278 if (!match(LHSI->getOperand(0), m_APFloat(C)))
8279 return nullptr;
8280
8281 if (C->isZero())
8282 return nullptr;
8283
8284 // Get swapped predicate if necessary.
8285 if (C->isNegative())
8286 Pred = I.getSwappedPredicate();
8287
8288 return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
8289}
8290
8291// Transform 'fptrunc(x) cmp C' to 'x cmp ext(C)' if possible.
8292// Patterns include:
8293// fptrunc(x) < C --> x < ext(C)
8294// fptrunc(x) <= C --> x <= ext(C)
8295// fptrunc(x) > C --> x > ext(C)
8296// fptrunc(x) >= C --> x >= ext(C)
8297// where 'ext(C)' is the extension of 'C' to the type of 'x' with a small bias
8298// due to precision loss.
8300 const Constant &C) {
8301 FCmpInst::Predicate Pred = I.getPredicate();
8302 bool RoundDown = false;
8303
8304 if (Pred == FCmpInst::FCMP_OGE || Pred == FCmpInst::FCMP_UGE ||
8305 Pred == FCmpInst::FCMP_OLT || Pred == FCmpInst::FCMP_ULT)
8306 RoundDown = true;
8307 else if (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_UGT ||
8308 Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)
8309 RoundDown = false;
8310 else
8311 return nullptr;
8312
8313 const APFloat *CValue;
8314 if (!match(&C, m_APFloat(CValue)))
8315 return nullptr;
8316
8317 if (CValue->isNaN() || CValue->isInfinity())
8318 return nullptr;
8319
8320 auto ConvertFltSema = [](const APFloat &Src, const fltSemantics &Sema) {
8321 bool LosesInfo;
8322 APFloat Dest = Src;
8323 Dest.convert(Sema, APFloat::rmNearestTiesToEven, &LosesInfo);
8324 return Dest;
8325 };
8326
8327 auto NextValue = [](const APFloat &Value, bool RoundDown) {
8328 APFloat NextValue = Value;
8329 NextValue.next(RoundDown);
8330 return NextValue;
8331 };
8332
8333 APFloat NextCValue = NextValue(*CValue, RoundDown);
8334
8335 Type *DestType = FPTrunc.getOperand(0)->getType();
8336 const fltSemantics &DestFltSema =
8337 DestType->getScalarType()->getFltSemantics();
8338
8339 APFloat ExtCValue = ConvertFltSema(*CValue, DestFltSema);
8340 APFloat ExtNextCValue = ConvertFltSema(NextCValue, DestFltSema);
8341
8342 // When 'NextCValue' is infinity, use an imaged 'NextCValue' that equals
8343 // 'CValue + bias' to avoid the infinity after conversion. The bias is
8344 // estimated as 'CValue - PrevCValue', where 'PrevCValue' is the previous
8345 // value of 'CValue'.
8346 if (NextCValue.isInfinity()) {
8347 APFloat PrevCValue = NextValue(*CValue, !RoundDown);
8348 APFloat Bias = ConvertFltSema(*CValue - PrevCValue, DestFltSema);
8349
8350 ExtNextCValue = ExtCValue + Bias;
8351 }
8352
8353 APFloat ExtMidValue =
8354 scalbn(ExtCValue + ExtNextCValue, -1, APFloat::rmNearestTiesToEven);
8355
8356 const fltSemantics &SrcFltSema =
8357 C.getType()->getScalarType()->getFltSemantics();
8358
8359 // 'MidValue' might be rounded to 'NextCValue'. Correct it here.
8360 APFloat MidValue = ConvertFltSema(ExtMidValue, SrcFltSema);
8361 if (MidValue != *CValue)
8362 ExtMidValue.next(!RoundDown);
8363
8364 // Check whether 'ExtMidValue' is a valid result since the assumption on
8365 // imaged 'NextCValue' might not hold for new float types.
8366 // ppc_fp128 can't pass here when converting from max float because of
8367 // APFloat implementation.
8368 if (NextCValue.isInfinity()) {
8369 // ExtMidValue --- narrowed ---> Finite
8370 if (ConvertFltSema(ExtMidValue, SrcFltSema).isInfinity())
8371 return nullptr;
8372
8373 // NextExtMidValue --- narrowed ---> Infinity
8374 APFloat NextExtMidValue = NextValue(ExtMidValue, RoundDown);
8375 if (ConvertFltSema(NextExtMidValue, SrcFltSema).isFinite())
8376 return nullptr;
8377 }
8378
8379 return new FCmpInst(Pred, FPTrunc.getOperand(0),
8380 ConstantFP::get(DestType, ExtMidValue), "", &I);
8381}
8382
8383/// Optimize fabs(X) compared with zero.
8385 Value *X;
8386 if (!match(I.getOperand(0), m_FAbs(m_Value(X))))
8387 return nullptr;
8388
8389 const APFloat *C;
8390 if (!match(I.getOperand(1), m_APFloat(C)))
8391 return nullptr;
8392
8393 if (!C->isPosZero()) {
8394 if (!C->isSmallestNormalized())
8395 return nullptr;
8396
8397 const Function *F = I.getFunction();
8398 DenormalMode Mode = F->getDenormalMode(C->getSemantics());
8399 if (Mode.Input == DenormalMode::PreserveSign ||
8401
8402 auto replaceFCmp = [](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8403 Constant *Zero = ConstantFP::getZero(X->getType());
8404 return new FCmpInst(P, X, Zero, "", I);
8405 };
8406
8407 switch (I.getPredicate()) {
8408 case FCmpInst::FCMP_OLT:
8409 // fcmp olt fabs(x), smallest_normalized_number -> fcmp oeq x, 0.0
8410 return replaceFCmp(&I, FCmpInst::FCMP_OEQ, X);
8411 case FCmpInst::FCMP_UGE:
8412 // fcmp uge fabs(x), smallest_normalized_number -> fcmp une x, 0.0
8413 return replaceFCmp(&I, FCmpInst::FCMP_UNE, X);
8414 case FCmpInst::FCMP_OGE:
8415 // fcmp oge fabs(x), smallest_normalized_number -> fcmp one x, 0.0
8416 return replaceFCmp(&I, FCmpInst::FCMP_ONE, X);
8417 case FCmpInst::FCMP_ULT:
8418 // fcmp ult fabs(x), smallest_normalized_number -> fcmp ueq x, 0.0
8419 return replaceFCmp(&I, FCmpInst::FCMP_UEQ, X);
8420 default:
8421 break;
8422 }
8423 }
8424
8425 return nullptr;
8426 }
8427
8428 auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8429 I->setPredicate(P);
8430 return IC.replaceOperand(*I, 0, X);
8431 };
8432
8433 switch (I.getPredicate()) {
8434 case FCmpInst::FCMP_UGE:
8435 case FCmpInst::FCMP_OLT:
8436 // fabs(X) >= 0.0 --> true
8437 // fabs(X) < 0.0 --> false
8438 llvm_unreachable("fcmp should have simplified");
8439
8440 case FCmpInst::FCMP_OGT:
8441 // fabs(X) > 0.0 --> X != 0.0
8442 return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
8443
8444 case FCmpInst::FCMP_UGT:
8445 // fabs(X) u> 0.0 --> X u!= 0.0
8446 return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
8447
8448 case FCmpInst::FCMP_OLE:
8449 // fabs(X) <= 0.0 --> X == 0.0
8450 return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
8451
8452 case FCmpInst::FCMP_ULE:
8453 // fabs(X) u<= 0.0 --> X u== 0.0
8454 return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
8455
8456 case FCmpInst::FCMP_OGE:
8457 // fabs(X) >= 0.0 --> !isnan(X)
8458 assert(!I.hasNoNaNs() && "fcmp should have simplified");
8459 return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
8460
8461 case FCmpInst::FCMP_ULT:
8462 // fabs(X) u< 0.0 --> isnan(X)
8463 assert(!I.hasNoNaNs() && "fcmp should have simplified");
8464 return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
8465
8466 case FCmpInst::FCMP_OEQ:
8467 case FCmpInst::FCMP_UEQ:
8468 case FCmpInst::FCMP_ONE:
8469 case FCmpInst::FCMP_UNE:
8470 case FCmpInst::FCMP_ORD:
8471 case FCmpInst::FCMP_UNO:
8472 // Look through the fabs() because it doesn't change anything but the sign.
8473 // fabs(X) == 0.0 --> X == 0.0,
8474 // fabs(X) != 0.0 --> X != 0.0
8475 // isnan(fabs(X)) --> isnan(X)
8476 // !isnan(fabs(X) --> !isnan(X)
8477 return replacePredAndOp0(&I, I.getPredicate(), X);
8478
8479 default:
8480 return nullptr;
8481 }
8482}
8483
8484/// Optimize sqrt(X) compared with zero.
8486 Value *X;
8487 if (!match(I.getOperand(0), m_Sqrt(m_Value(X))))
8488 return nullptr;
8489
8490 if (!match(I.getOperand(1), m_PosZeroFP()))
8491 return nullptr;
8492
8493 auto ReplacePredAndOp0 = [&](FCmpInst::Predicate P) {
8494 I.setPredicate(P);
8495 return IC.replaceOperand(I, 0, X);
8496 };
8497
8498 // Clear ninf flag if sqrt doesn't have it.
8499 if (!cast<Instruction>(I.getOperand(0))->hasNoInfs())
8500 I.setHasNoInfs(false);
8501
8502 switch (I.getPredicate()) {
8503 case FCmpInst::FCMP_OLT:
8504 case FCmpInst::FCMP_UGE:
8505 // sqrt(X) < 0.0 --> false
8506 // sqrt(X) u>= 0.0 --> true
8507 llvm_unreachable("fcmp should have simplified");
8508 case FCmpInst::FCMP_ULT:
8509 case FCmpInst::FCMP_ULE:
8510 case FCmpInst::FCMP_OGT:
8511 case FCmpInst::FCMP_OGE:
8512 case FCmpInst::FCMP_OEQ:
8513 case FCmpInst::FCMP_UNE:
8514 // sqrt(X) u< 0.0 --> X u< 0.0
8515 // sqrt(X) u<= 0.0 --> X u<= 0.0
8516 // sqrt(X) > 0.0 --> X > 0.0
8517 // sqrt(X) >= 0.0 --> X >= 0.0
8518 // sqrt(X) == 0.0 --> X == 0.0
8519 // sqrt(X) u!= 0.0 --> X u!= 0.0
8520 return IC.replaceOperand(I, 0, X);
8521
8522 case FCmpInst::FCMP_OLE:
8523 // sqrt(X) <= 0.0 --> X == 0.0
8524 return ReplacePredAndOp0(FCmpInst::FCMP_OEQ);
8525 case FCmpInst::FCMP_UGT:
8526 // sqrt(X) u> 0.0 --> X u!= 0.0
8527 return ReplacePredAndOp0(FCmpInst::FCMP_UNE);
8528 case FCmpInst::FCMP_UEQ:
8529 // sqrt(X) u== 0.0 --> X u<= 0.0
8530 return ReplacePredAndOp0(FCmpInst::FCMP_ULE);
8531 case FCmpInst::FCMP_ONE:
8532 // sqrt(X) != 0.0 --> X > 0.0
8533 return ReplacePredAndOp0(FCmpInst::FCMP_OGT);
8534 case FCmpInst::FCMP_ORD:
8535 // !isnan(sqrt(X)) --> X >= 0.0
8536 return ReplacePredAndOp0(FCmpInst::FCMP_OGE);
8537 case FCmpInst::FCMP_UNO:
8538 // isnan(sqrt(X)) --> X u< 0.0
8539 return ReplacePredAndOp0(FCmpInst::FCMP_ULT);
8540 default:
8541 llvm_unreachable("Unexpected predicate!");
8542 }
8543}
8544
8546 CmpInst::Predicate Pred = I.getPredicate();
8547 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8548
8549 // Canonicalize fneg as Op1.
8550 if (match(Op0, m_FNeg(m_Value())) && !match(Op1, m_FNeg(m_Value()))) {
8551 std::swap(Op0, Op1);
8552 Pred = I.getSwappedPredicate();
8553 }
8554
8555 if (!match(Op1, m_FNeg(m_Specific(Op0))))
8556 return nullptr;
8557
8558 // Replace the negated operand with 0.0:
8559 // fcmp Pred Op0, -Op0 --> fcmp Pred Op0, 0.0
8560 Constant *Zero = ConstantFP::getZero(Op0->getType());
8561 return new FCmpInst(Pred, Op0, Zero, "", &I);
8562}
8563
8565 Constant *RHSC, InstCombinerImpl &CI) {
8566 const CmpInst::Predicate Pred = I.getPredicate();
8567 Value *X = LHSI->getOperand(0);
8568 Value *Y = LHSI->getOperand(1);
8569 switch (Pred) {
8570 default:
8571 break;
8572 case FCmpInst::FCMP_UGT:
8573 case FCmpInst::FCMP_ULT:
8574 case FCmpInst::FCMP_UNE:
8575 case FCmpInst::FCMP_OEQ:
8576 case FCmpInst::FCMP_OGE:
8577 case FCmpInst::FCMP_OLE:
8578 // The optimization is not valid if X and Y are infinities of the same
8579 // sign, i.e. the inf - inf = nan case. If the fsub has the ninf or nnan
8580 // flag then we can assume we do not have that case. Otherwise we might be
8581 // able to prove that either X or Y is not infinity.
8582 if (!LHSI->hasNoNaNs() && !LHSI->hasNoInfs() &&
8586 break;
8587
8588 [[fallthrough]];
8589 case FCmpInst::FCMP_OGT:
8590 case FCmpInst::FCMP_OLT:
8591 case FCmpInst::FCMP_ONE:
8592 case FCmpInst::FCMP_UEQ:
8593 case FCmpInst::FCMP_UGE:
8594 case FCmpInst::FCMP_ULE:
8595 // fcmp pred (x - y), 0 --> fcmp pred x, y
8596 if (match(RHSC, m_AnyZeroFP()) &&
8597 I.getFunction()->getDenormalMode(
8598 LHSI->getType()->getScalarType()->getFltSemantics()) ==
8600 CI.replaceOperand(I, 0, X);
8601 CI.replaceOperand(I, 1, Y);
8602 I.setHasNoInfs(LHSI->hasNoInfs());
8603 if (LHSI->hasNoNaNs())
8604 I.setHasNoNaNs(true);
8605 return &I;
8606 }
8607 break;
8608 }
8609
8610 return nullptr;
8611}
8612
8614 InstCombinerImpl &IC) {
8615 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
8616 Type *OpType = LHS->getType();
8617 CmpInst::Predicate Pred = I.getPredicate();
8618
8621
8622 if (!FloorX && !CeilX) {
8625 std::swap(LHS, RHS);
8626 Pred = I.getSwappedPredicate();
8627 }
8628 }
8629
8630 switch (Pred) {
8631 case FCmpInst::FCMP_OLE:
8632 // fcmp ole floor(x), x => fcmp ord x, 0
8633 if (FloorX)
8635 "", &I);
8636 break;
8637 case FCmpInst::FCMP_OGT:
8638 // fcmp ogt floor(x), x => false
8639 if (FloorX)
8640 return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8641 break;
8642 case FCmpInst::FCMP_OGE:
8643 // fcmp oge ceil(x), x => fcmp ord x, 0
8644 if (CeilX)
8646 "", &I);
8647 break;
8648 case FCmpInst::FCMP_OLT:
8649 // fcmp olt ceil(x), x => false
8650 if (CeilX)
8651 return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8652 break;
8653 case FCmpInst::FCMP_ULE:
8654 // fcmp ule floor(x), x => true
8655 if (FloorX)
8656 return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8657 break;
8658 case FCmpInst::FCMP_UGT:
8659 // fcmp ugt floor(x), x => fcmp uno x, 0
8660 if (FloorX)
8662 "", &I);
8663 break;
8664 case FCmpInst::FCMP_UGE:
8665 // fcmp uge ceil(x), x => true
8666 if (CeilX)
8667 return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8668 break;
8669 case FCmpInst::FCMP_ULT:
8670 // fcmp ult ceil(x), x => fcmp uno x, 0
8671 if (CeilX)
8673 "", &I);
8674 break;
8675 default:
8676 break;
8677 }
8678
8679 return nullptr;
8680}
8681
8683 bool Changed = false;
8684
8685 /// Orders the operands of the compare so that they are listed from most
8686 /// complex to least complex. This puts constants before unary operators,
8687 /// before binary operators.
8688 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
8689 I.swapOperands();
8690 Changed = true;
8691 }
8692
8693 const CmpInst::Predicate Pred = I.getPredicate();
8694 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8695 if (Value *V = simplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
8696 SQ.getWithInstruction(&I)))
8697 return replaceInstUsesWith(I, V);
8698
8699 // Simplify 'fcmp pred X, X'
8700 Type *OpType = Op0->getType();
8701 assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
8702 if (Op0 == Op1) {
8703 switch (Pred) {
8704 default:
8705 break;
8706 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
8707 case FCmpInst::FCMP_ULT: // True if unordered or less than
8708 case FCmpInst::FCMP_UGT: // True if unordered or greater than
8709 case FCmpInst::FCMP_UNE: // True if unordered or not equal
8710 // Canonicalize these to be 'fcmp uno %X, 0.0'.
8711 I.setPredicate(FCmpInst::FCMP_UNO);
8712 I.setOperand(1, Constant::getNullValue(OpType));
8713 return &I;
8714
8715 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
8716 case FCmpInst::FCMP_OEQ: // True if ordered and equal
8717 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
8718 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
8719 // Canonicalize these to be 'fcmp ord %X, 0.0'.
8720 I.setPredicate(FCmpInst::FCMP_ORD);
8721 I.setOperand(1, Constant::getNullValue(OpType));
8722 return &I;
8723 }
8724 }
8725
8726 if (I.isCommutative()) {
8727 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
8728 replaceOperand(I, 0, Pair->first);
8729 replaceOperand(I, 1, Pair->second);
8730 return &I;
8731 }
8732 }
8733
8734 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
8735 // then canonicalize the operand to 0.0.
8736 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
8737 if (!match(Op0, m_PosZeroFP()) &&
8738 isKnownNeverNaN(Op0, getSimplifyQuery().getWithInstruction(&I)))
8739 return replaceOperand(I, 0, ConstantFP::getZero(OpType));
8740
8741 if (!match(Op1, m_PosZeroFP()) &&
8742 isKnownNeverNaN(Op1, getSimplifyQuery().getWithInstruction(&I)))
8743 return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8744 }
8745
8746 // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
8747 Value *X, *Y;
8748 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
8749 return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
8750
8752 return R;
8753
8754 // Test if the FCmpInst instruction is used exclusively by a select as
8755 // part of a minimum or maximum operation. If so, refrain from doing
8756 // any other folding. This helps out other analyses which understand
8757 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
8758 // and CodeGen. And in this case, at least one of the comparison
8759 // operands has at least one user besides the compare (the select),
8760 // which would often largely negate the benefit of folding anyway.
8761 if (I.hasOneUse())
8762 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
8763 Value *A, *B;
8765 if (SPR.Flavor != SPF_UNKNOWN)
8766 return nullptr;
8767 }
8768
8769 // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
8770 // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
8771 if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
8772 return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8773
8774 // Canonicalize:
8775 // fcmp olt X, +inf -> fcmp one X, +inf
8776 // fcmp ole X, +inf -> fcmp ord X, 0
8777 // fcmp ogt X, +inf -> false
8778 // fcmp oge X, +inf -> fcmp oeq X, +inf
8779 // fcmp ult X, +inf -> fcmp une X, +inf
8780 // fcmp ule X, +inf -> true
8781 // fcmp ugt X, +inf -> fcmp uno X, 0
8782 // fcmp uge X, +inf -> fcmp ueq X, +inf
8783 // fcmp olt X, -inf -> false
8784 // fcmp ole X, -inf -> fcmp oeq X, -inf
8785 // fcmp ogt X, -inf -> fcmp one X, -inf
8786 // fcmp oge X, -inf -> fcmp ord X, 0
8787 // fcmp ult X, -inf -> fcmp uno X, 0
8788 // fcmp ule X, -inf -> fcmp ueq X, -inf
8789 // fcmp ugt X, -inf -> fcmp une X, -inf
8790 // fcmp uge X, -inf -> true
8791 const APFloat *C;
8792 if (match(Op1, m_APFloat(C)) && C->isInfinity()) {
8793 switch (C->isNegative() ? FCmpInst::getSwappedPredicate(Pred) : Pred) {
8794 default:
8795 break;
8796 case FCmpInst::FCMP_ORD:
8797 case FCmpInst::FCMP_UNO:
8800 case FCmpInst::FCMP_OGT:
8801 case FCmpInst::FCMP_ULE:
8802 llvm_unreachable("Should be simplified by InstSimplify");
8803 case FCmpInst::FCMP_OLT:
8804 return new FCmpInst(FCmpInst::FCMP_ONE, Op0, Op1, "", &I);
8805 case FCmpInst::FCMP_OLE:
8806 return new FCmpInst(FCmpInst::FCMP_ORD, Op0, ConstantFP::getZero(OpType),
8807 "", &I);
8808 case FCmpInst::FCMP_OGE:
8809 return new FCmpInst(FCmpInst::FCMP_OEQ, Op0, Op1, "", &I);
8810 case FCmpInst::FCMP_ULT:
8811 return new FCmpInst(FCmpInst::FCMP_UNE, Op0, Op1, "", &I);
8812 case FCmpInst::FCMP_UGT:
8813 return new FCmpInst(FCmpInst::FCMP_UNO, Op0, ConstantFP::getZero(OpType),
8814 "", &I);
8815 case FCmpInst::FCMP_UGE:
8816 return new FCmpInst(FCmpInst::FCMP_UEQ, Op0, Op1, "", &I);
8817 }
8818 }
8819
8820 // Ignore signbit of bitcasted int when comparing equality to FP 0.0:
8821 // fcmp oeq/une (bitcast X), 0.0 --> (and X, SignMaskC) ==/!= 0
8822 if (match(Op1, m_PosZeroFP()) &&
8825 if (Pred == FCmpInst::FCMP_OEQ)
8826 IntPred = ICmpInst::ICMP_EQ;
8827 else if (Pred == FCmpInst::FCMP_UNE)
8828 IntPred = ICmpInst::ICMP_NE;
8829
8830 if (IntPred != ICmpInst::BAD_ICMP_PREDICATE) {
8831 Type *IntTy = X->getType();
8832 const APInt &SignMask = ~APInt::getSignMask(IntTy->getScalarSizeInBits());
8833 Value *MaskX = Builder.CreateAnd(X, ConstantInt::get(IntTy, SignMask));
8834 return new ICmpInst(IntPred, MaskX, ConstantInt::getNullValue(IntTy));
8835 }
8836 }
8837
8838 // Handle fcmp with instruction LHS and constant RHS.
8839 Instruction *LHSI;
8840 Constant *RHSC;
8841 if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
8842 switch (LHSI->getOpcode()) {
8843 case Instruction::Select:
8844 // fcmp eq (cond ? x : -x), 0 --> fcmp eq x, 0
8845 if (FCmpInst::isEquality(Pred) && match(RHSC, m_AnyZeroFP()) &&
8847 return replaceOperand(I, 0, X);
8849 return NV;
8850 break;
8851 case Instruction::FSub:
8852 if (LHSI->hasOneUse())
8853 if (Instruction *NV = foldFCmpFSubIntoFCmp(I, LHSI, RHSC, *this))
8854 return NV;
8855 break;
8856 case Instruction::PHI:
8857 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
8858 return NV;
8859 break;
8860 case Instruction::SIToFP:
8861 case Instruction::UIToFP:
8862 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
8863 return NV;
8864 break;
8865 case Instruction::FDiv:
8866 if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
8867 return NV;
8868 break;
8869 case Instruction::Load:
8870 if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
8871 if (Instruction *Res =
8873 return Res;
8874 break;
8875 case Instruction::FPTrunc:
8876 if (Instruction *NV = foldFCmpFpTrunc(I, *LHSI, *RHSC))
8877 return NV;
8878 break;
8879 }
8880 }
8881
8882 if (Instruction *R = foldFabsWithFcmpZero(I, *this))
8883 return R;
8884
8885 if (Instruction *R = foldSqrtWithFcmpZero(I, *this))
8886 return R;
8887
8888 if (Instruction *R = foldFCmpWithFloorAndCeil(I, *this))
8889 return R;
8890
8891 if (match(Op0, m_FNeg(m_Value(X)))) {
8892 // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
8893 Constant *C;
8894 if (match(Op1, m_Constant(C)))
8895 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
8896 return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
8897 }
8898
8899 // fcmp (fadd X, 0.0), Y --> fcmp X, Y
8900 if (match(Op0, m_FAdd(m_Value(X), m_AnyZeroFP())))
8901 return new FCmpInst(Pred, X, Op1, "", &I);
8902
8903 // fcmp X, (fadd Y, 0.0) --> fcmp X, Y
8904 if (match(Op1, m_FAdd(m_Value(Y), m_AnyZeroFP())))
8905 return new FCmpInst(Pred, Op0, Y, "", &I);
8906
8907 if (match(Op0, m_FPExt(m_Value(X)))) {
8908 // fcmp (fpext X), (fpext Y) -> fcmp X, Y
8909 if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
8910 return new FCmpInst(Pred, X, Y, "", &I);
8911
8912 const APFloat *C;
8913 if (match(Op1, m_APFloat(C))) {
8914 const fltSemantics &FPSem =
8915 X->getType()->getScalarType()->getFltSemantics();
8916 bool Lossy;
8917 APFloat TruncC = *C;
8918 TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
8919
8920 if (Lossy) {
8921 // X can't possibly equal the higher-precision constant, so reduce any
8922 // equality comparison.
8923 // TODO: Other predicates can be handled via getFCmpCode().
8924 switch (Pred) {
8925 case FCmpInst::FCMP_OEQ:
8926 // X is ordered and equal to an impossible constant --> false
8927 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8928 case FCmpInst::FCMP_ONE:
8929 // X is ordered and not equal to an impossible constant --> ordered
8930 return new FCmpInst(FCmpInst::FCMP_ORD, X,
8931 ConstantFP::getZero(X->getType()));
8932 case FCmpInst::FCMP_UEQ:
8933 // X is unordered or equal to an impossible constant --> unordered
8934 return new FCmpInst(FCmpInst::FCMP_UNO, X,
8935 ConstantFP::getZero(X->getType()));
8936 case FCmpInst::FCMP_UNE:
8937 // X is unordered or not equal to an impossible constant --> true
8938 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8939 default:
8940 break;
8941 }
8942 }
8943
8944 // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
8945 // Avoid lossy conversions and denormals.
8946 // Zero is a special case that's OK to convert.
8947 APFloat Fabs = TruncC;
8948 Fabs.clearSign();
8949 if (!Lossy &&
8950 (Fabs.isZero() || !(Fabs < APFloat::getSmallestNormalized(FPSem)))) {
8951 Constant *NewC = ConstantFP::get(X->getType(), TruncC);
8952 return new FCmpInst(Pred, X, NewC, "", &I);
8953 }
8954 }
8955 }
8956
8957 // Convert a sign-bit test of an FP value into a cast and integer compare.
8958 // TODO: Simplify if the copysign constant is 0.0 or NaN.
8959 // TODO: Handle non-zero compare constants.
8960 // TODO: Handle other predicates.
8962 m_Value(X)))) &&
8963 match(Op1, m_AnyZeroFP()) && !C->isZero() && !C->isNaN()) {
8964 Type *IntType = Builder.getIntNTy(X->getType()->getScalarSizeInBits());
8965 if (auto *VecTy = dyn_cast<VectorType>(OpType))
8966 IntType = VectorType::get(IntType, VecTy->getElementCount());
8967
8968 // copysign(non-zero constant, X) < 0.0 --> (bitcast X) < 0
8969 if (Pred == FCmpInst::FCMP_OLT) {
8970 Value *IntX = Builder.CreateBitCast(X, IntType);
8971 return new ICmpInst(ICmpInst::ICMP_SLT, IntX,
8972 ConstantInt::getNullValue(IntType));
8973 }
8974 }
8975
8976 {
8977 Value *CanonLHS = nullptr;
8979 // (canonicalize(x) == x) => (x == x)
8980 if (CanonLHS == Op1)
8981 return new FCmpInst(Pred, Op1, Op1, "", &I);
8982
8983 Value *CanonRHS = nullptr;
8985 // (x == canonicalize(x)) => (x == x)
8986 if (CanonRHS == Op0)
8987 return new FCmpInst(Pred, Op0, Op0, "", &I);
8988
8989 // (canonicalize(x) == canonicalize(y)) => (x == y)
8990 if (CanonLHS && CanonRHS)
8991 return new FCmpInst(Pred, CanonLHS, CanonRHS, "", &I);
8992 }
8993
8994 if (I.getType()->isVectorTy())
8995 if (Instruction *Res = foldVectorCmp(I, Builder))
8996 return Res;
8997
8998 return Changed ? &I : nullptr;
8999}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define Check(C,...)
Hexagon Common GEP
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static void collectOffsetOp(Value *V, SmallVectorImpl< OffsetOp > &Offsets, bool AllowRecursion)
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static Instruction * foldICmpEqualityWithOffset(ICmpInst &I, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Offset both sides of an equality icmp to see if we can save some instructions: icmp eq/ne X,...
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldFCmpFpTrunc(FCmpInst &I, const Instruction &FPTrunc, const Constant &C)
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q)
Return true if X is a multiple of C.
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T1
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Value * RHS
Value * LHS
BinaryOperator * Mul
static constexpr roundingMode rmTowardZero
Definition APFloat.h:348
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
Definition APFloat.cpp:6053
void clearSign()
Definition APFloat.h:1280
bool isNaN() const
Definition APFloat.h:1429
bool isZero() const
Definition APFloat.h:1427
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
Definition APFloat.h:1140
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1120
opStatus next(bool nextDown)
Definition APFloat.h:1236
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1080
LLVM_ABI FPClassTest classify() const
Return the FPClassTest which will return true for the value.
Definition APFloat.cpp:5982
opStatus roundToIntegral(roundingMode RM)
Definition APFloat.h:1230
bool isInfinity() const
Definition APFloat.h:1428
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition APInt.cpp:1573
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1758
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
Definition APInt.h:450
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1513
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
APInt abs() const
Get the absolute value.
Definition APInt.h:1796
unsigned ceilLogBase2() const
Definition APInt.h:1765
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition APInt.h:1202
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1948
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition APInt.h:1183
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
Definition APInt.h:467
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition APInt.h:217
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
bool eq(const APInt &RHS) const
Equality comparison.
Definition APInt.h:1080
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition APInt.cpp:1644
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1935
void negate()
Negate this APInt in place.
Definition APInt.h:1469
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1640
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition APInt.h:1599
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:357
void flipAllBits()
Toggle every bit to its opposite value.
Definition APInt.h:1453
unsigned countl_one() const
Count the number of leading one bits.
Definition APInt.h:1616
unsigned logBase2() const
Definition APInt.h:1762
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:874
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1238
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1941
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:390
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
unsigned countr_one() const
Count the number of trailing one bits.
Definition APInt.h:1657
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1222
An arbitrary precision integer that knows its signedness.
Definition APSInt.h:24
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
static LLVM_ABI Predicate getFlippedStrictnessPredicate(Predicate pred)
This is a static version that you can use without an instruction available.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
static LLVM_ABI bool isStrictPredicate(Predicate predicate)
This is a static version that you can use without an instruction available.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
Definition Constants.h:269
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:136
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate Pred)
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition Operator.h:430
LLVM_ABI Type * getSourceElementType() const
Definition Operator.cpp:71
Value * getPointerOperand()
Definition Operator.h:457
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
Definition Operator.h:504
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2442
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min)
Match and fold patterns like: icmp eq/ne X, min(max(X, Lo), Hi) which represents a range check and ca...
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp)
Fold icmp eq (num + mask) & ~mask, num to icmp eq (and num, mask), 0 Where mask is a low bit mask.
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
SimplifyQuery SQ
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
const DataLayout & DL
DomConditionCache DC
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
BuilderTy & Builder
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isShift() const
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static bool isMin(Intrinsic::ID ID)
Whether the intrinsic is a smin or umin.
static bool isSigned(Intrinsic::ID ID)
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
Definition SetVector.h:57
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
bool contains(const_arg_type key) const
Check if the SetVector contains the given key.
Definition SetVector.h:252
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
Definition Type.h:165
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
Definition Type.cpp:235
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
void setOperand(unsigned i, Value *Val)
Definition User.h:237
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:158
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
Definition APInt.cpp:2763
LLVM_ABI APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
Definition APInt.cpp:2781
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
Definition APFloat.h:1516
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition bit.h:236
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_UNKNOWN
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
Definition Loads.cpp:893
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
Definition APFloat.h:1525
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1751
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition MathExtras.h:394
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1973
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2120
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
@ Continue
Definition DWP.h:22
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:866
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define NC
Definition regutils.h:42
Value * materialize(InstCombiner::BuilderTy &Builder) const
static OffsetResult value(Value *V)
static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV)
static OffsetResult invalid()
This callback is used in conjunction with PointerMayBeCaptured.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:80
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:242
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:274
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
Definition KnownBits.h:151
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition KnownBits.h:289
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
bool isConstant() const
Returns true if we know the value of all bits.
Definition KnownBits.h:54
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:145
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:129
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
Definition KnownBits.h:114
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
unsigned countMinPopulation() const
Returns the number of bits known to be one.
Definition KnownBits.h:286
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
Definition KnownBits.h:135
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition KnownBits.h:60
Linear expression BasePtr + Index * Scale + Offset.
Definition Loads.h:203
GEPNoWrapFlags Flags
Definition Loads.h:208
Matching combinators.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
const Instruction * CxtI
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
AssumptionCache * AC
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276
Capture information for a specific Use.