LLVM 23.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147namespace llvm {
149} // end namespace llvm
150
151// FIXME: Remove this flag when it is no longer necessary to convert
152// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
153// increases variable availability at the cost of accuracy. Variables that
154// cannot be promoted by mem2reg or SROA will be described as living in memory
155// for their entire lifetime. However, passes like DSE and instcombine can
156// delete stores to the alloca, leading to misleading and inaccurate debug
157// information. This flag can be removed when those passes are fixed.
158static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
159 cl::Hidden, cl::init(true));
160
161std::optional<Instruction *>
163 // Handle target specific intrinsics
164 if (II.getCalledFunction()->isTargetIntrinsic()) {
165 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
166 }
167 return std::nullopt;
168}
169
171 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
172 bool &KnownBitsComputed) {
173 // Handle target specific intrinsics
174 if (II.getCalledFunction()->isTargetIntrinsic()) {
175 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
176 *this, II, DemandedMask, Known, KnownBitsComputed);
177 }
178 return std::nullopt;
179}
180
182 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
183 APInt &PoisonElts2, APInt &PoisonElts3,
184 std::function<void(Instruction *, unsigned, APInt, APInt &)>
185 SimplifyAndSetOp) {
186 // Handle target specific intrinsics
187 if (II.getCalledFunction()->isTargetIntrinsic()) {
188 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
189 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
190 SimplifyAndSetOp);
191 }
192 return std::nullopt;
193}
194
195bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
196 // Approved exception for TTI use: This queries a legality property of the
197 // target, not an profitability heuristic. Ideally this should be part of
198 // DataLayout instead.
199 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
200}
201
202Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
203 if (!RewriteGEP)
204 return llvm::emitGEPOffset(&Builder, DL, GEP);
205
206 IRBuilderBase::InsertPointGuard Guard(Builder);
207 auto *Inst = dyn_cast<Instruction>(GEP);
208 if (Inst)
209 Builder.SetInsertPoint(Inst);
210
211 Value *Offset = EmitGEPOffset(GEP);
212 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
213 if (Inst && !GEP->hasAllConstantIndices() &&
214 !GEP->getSourceElementType()->isIntegerTy(8)) {
216 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
217 Offset, "", GEP->getNoWrapFlags()));
219 }
220 return Offset;
221}
222
223Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
224 GEPNoWrapFlags NW, Type *IdxTy,
225 bool RewriteGEPs) {
226 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
227 if (Sum)
228 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
229 NW.isInBounds());
230 else
231 return Offset;
232 };
233
234 Value *Sum = nullptr;
235 Value *OneUseSum = nullptr;
236 Value *OneUseBase = nullptr;
237 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
238 for (GEPOperator *GEP : reverse(GEPs)) {
239 Value *Offset;
240 {
241 // Expand the offset at the point of the previous GEP to enable rewriting.
242 // However, use the original insertion point for calculating Sum.
243 IRBuilderBase::InsertPointGuard Guard(Builder);
244 auto *Inst = dyn_cast<Instruction>(GEP);
245 if (RewriteGEPs && Inst)
246 Builder.SetInsertPoint(Inst);
247
249 if (Offset->getType() != IdxTy)
250 Offset = Builder.CreateVectorSplat(
251 cast<VectorType>(IdxTy)->getElementCount(), Offset);
252 if (GEP->hasOneUse()) {
253 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
254 OneUseSum = Add(OneUseSum, Offset);
255 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
256 if (!OneUseBase)
257 OneUseBase = GEP->getPointerOperand();
258 continue;
259 }
260
261 if (OneUseSum)
262 Offset = Add(OneUseSum, Offset);
263
264 // Rewrite the GEP to reuse the computed offset. This also includes
265 // offsets from preceding one-use GEPs of matched type.
266 if (RewriteGEPs && Inst &&
267 Offset->getType()->isVectorTy() == GEP->getType()->isVectorTy() &&
268 !(GEP->getSourceElementType()->isIntegerTy(8) &&
269 GEP->getOperand(1) == Offset)) {
271 *Inst,
272 Builder.CreatePtrAdd(
273 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
274 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
276 }
277 }
278
279 Sum = Add(Sum, Offset);
280 OneUseSum = OneUseBase = nullptr;
281 OneUseFlags = GEPNoWrapFlags::all();
282 }
283 if (OneUseSum)
284 Sum = Add(Sum, OneUseSum);
285 if (!Sum)
286 return Constant::getNullValue(IdxTy);
287 return Sum;
288}
289
290/// Legal integers and common types are considered desirable. This is used to
291/// avoid creating instructions with types that may not be supported well by the
292/// the backend.
293/// NOTE: This treats i8, i16 and i32 specially because they are common
294/// types in frontend languages.
295bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
296 switch (BitWidth) {
297 case 8:
298 case 16:
299 case 32:
300 return true;
301 default:
302 return DL.isLegalInteger(BitWidth);
303 }
304}
305
306/// Return true if it is desirable to convert an integer computation from a
307/// given bit width to a new bit width.
308/// We don't want to convert from a legal or desirable type (like i8) to an
309/// illegal type or from a smaller to a larger illegal type. A width of '1'
310/// is always treated as a desirable type because i1 is a fundamental type in
311/// IR, and there are many specialized optimizations for i1 types.
312/// Common/desirable widths are equally treated as legal to convert to, in
313/// order to open up more combining opportunities.
314bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
315 unsigned ToWidth) const {
316 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
317 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
318
319 // Convert to desirable widths even if they are not legal types.
320 // Only shrink types, to prevent infinite loops.
321 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
322 return true;
323
324 // If this is a legal or desiable integer from type, and the result would be
325 // an illegal type, don't do the transformation.
326 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
327 return false;
328
329 // Otherwise, if both are illegal, do not increase the size of the result. We
330 // do allow things like i160 -> i64, but not i64 -> i160.
331 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
332 return false;
333
334 return true;
335}
336
337/// Return true if it is desirable to convert a computation from 'From' to 'To'.
338/// We don't want to convert from a legal to an illegal type or from a smaller
339/// to a larger illegal type. i1 is always treated as a legal type because it is
340/// a fundamental type in IR, and there are many specialized optimizations for
341/// i1 types.
342bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
343 // TODO: This could be extended to allow vectors. Datalayout changes might be
344 // needed to properly support that.
345 if (!From->isIntegerTy() || !To->isIntegerTy())
346 return false;
347
348 unsigned FromWidth = From->getPrimitiveSizeInBits();
349 unsigned ToWidth = To->getPrimitiveSizeInBits();
350 return shouldChangeType(FromWidth, ToWidth);
351}
352
353// Return true, if No Signed Wrap should be maintained for I.
354// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
355// where both B and C should be ConstantInts, results in a constant that does
356// not overflow. This function only handles the Add/Sub/Mul opcodes. For
357// all other opcodes, the function conservatively returns false.
360 if (!OBO || !OBO->hasNoSignedWrap())
361 return false;
362
363 const APInt *BVal, *CVal;
364 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
365 return false;
366
367 // We reason about Add/Sub/Mul Only.
368 bool Overflow = false;
369 switch (I.getOpcode()) {
370 case Instruction::Add:
371 (void)BVal->sadd_ov(*CVal, Overflow);
372 break;
373 case Instruction::Sub:
374 (void)BVal->ssub_ov(*CVal, Overflow);
375 break;
376 case Instruction::Mul:
377 (void)BVal->smul_ov(*CVal, Overflow);
378 break;
379 default:
380 // Conservatively return false for other opcodes.
381 return false;
382 }
383 return !Overflow;
384}
385
388 return OBO && OBO->hasNoUnsignedWrap();
389}
390
393 return OBO && OBO->hasNoSignedWrap();
394}
395
396/// Conservatively clears subclassOptionalData after a reassociation or
397/// commutation. We preserve fast-math flags when applicable as they can be
398/// preserved.
401 if (!FPMO) {
402 I.clearSubclassOptionalData();
403 return;
404 }
405
406 FastMathFlags FMF = I.getFastMathFlags();
407 I.clearSubclassOptionalData();
408 I.setFastMathFlags(FMF);
409}
410
411/// Combine constant operands of associative operations either before or after a
412/// cast to eliminate one of the associative operations:
413/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
414/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
416 InstCombinerImpl &IC) {
417 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
418 if (!Cast || !Cast->hasOneUse())
419 return false;
420
421 // TODO: Enhance logic for other casts and remove this check.
422 auto CastOpcode = Cast->getOpcode();
423 if (CastOpcode != Instruction::ZExt)
424 return false;
425
426 // TODO: Enhance logic for other BinOps and remove this check.
427 if (!BinOp1->isBitwiseLogicOp())
428 return false;
429
430 auto AssocOpcode = BinOp1->getOpcode();
431 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
432 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
433 return false;
434
435 Constant *C1, *C2;
436 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
437 !match(BinOp2->getOperand(1), m_Constant(C2)))
438 return false;
439
440 // TODO: This assumes a zext cast.
441 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
442 // to the destination type might lose bits.
443
444 // Fold the constants together in the destination type:
445 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
446 const DataLayout &DL = IC.getDataLayout();
447 Type *DestTy = C1->getType();
448 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
449 if (!CastC2)
450 return false;
451 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
452 if (!FoldedC)
453 return false;
454
455 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
456 IC.replaceOperand(*BinOp1, 1, FoldedC);
458 Cast->dropPoisonGeneratingFlags();
459 return true;
460}
461
462// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
463// inttoptr ( ptrtoint (x) ) --> x
464Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
465 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
466 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
467 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
468 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
469 Type *CastTy = IntToPtr->getDestTy();
470 if (PtrToInt &&
471 CastTy->getPointerAddressSpace() ==
472 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
473 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
474 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
475 return PtrToInt->getOperand(0);
476 }
477 return nullptr;
478}
479
480/// This performs a few simplifications for operators that are associative or
481/// commutative:
482///
483/// Commutative operators:
484///
485/// 1. Order operands such that they are listed from right (least complex) to
486/// left (most complex). This puts constants before unary operators before
487/// binary operators.
488///
489/// Associative operators:
490///
491/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
492/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
493///
494/// Associative and commutative operators:
495///
496/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
497/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
498/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
499/// if C1 and C2 are constants.
501 Instruction::BinaryOps Opcode = I.getOpcode();
502 bool Changed = false;
503
504 do {
505 // Order operands such that they are listed from right (least complex) to
506 // left (most complex). This puts constants before unary operators before
507 // binary operators.
508 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
509 getComplexity(I.getOperand(1)))
510 Changed = !I.swapOperands();
511
512 if (I.isCommutative()) {
513 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
514 replaceOperand(I, 0, Pair->first);
515 replaceOperand(I, 1, Pair->second);
516 Changed = true;
517 }
518 }
519
520 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
521 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
522
523 if (I.isAssociative()) {
524 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
525 if (Op0 && Op0->getOpcode() == Opcode) {
526 Value *A = Op0->getOperand(0);
527 Value *B = Op0->getOperand(1);
528 Value *C = I.getOperand(1);
529
530 // Does "B op C" simplify?
531 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
532 // It simplifies to V. Form "A op V".
533 replaceOperand(I, 0, A);
534 replaceOperand(I, 1, V);
535 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
536 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
537
538 // Conservatively clear all optional flags since they may not be
539 // preserved by the reassociation. Reset nsw/nuw based on the above
540 // analysis.
542
543 // Note: this is only valid because SimplifyBinOp doesn't look at
544 // the operands to Op0.
545 if (IsNUW)
546 I.setHasNoUnsignedWrap(true);
547
548 if (IsNSW)
549 I.setHasNoSignedWrap(true);
550
551 Changed = true;
552 ++NumReassoc;
553 continue;
554 }
555 }
556
557 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
558 if (Op1 && Op1->getOpcode() == Opcode) {
559 Value *A = I.getOperand(0);
560 Value *B = Op1->getOperand(0);
561 Value *C = Op1->getOperand(1);
562
563 // Does "A op B" simplify?
564 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
565 // It simplifies to V. Form "V op C".
566 replaceOperand(I, 0, V);
567 replaceOperand(I, 1, C);
568 // Conservatively clear the optional flags, since they may not be
569 // preserved by the reassociation.
571 Changed = true;
572 ++NumReassoc;
573 continue;
574 }
575 }
576 }
577
578 if (I.isAssociative() && I.isCommutative()) {
579 if (simplifyAssocCastAssoc(&I, *this)) {
580 Changed = true;
581 ++NumReassoc;
582 continue;
583 }
584
585 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
586 if (Op0 && Op0->getOpcode() == Opcode) {
587 Value *A = Op0->getOperand(0);
588 Value *B = Op0->getOperand(1);
589 Value *C = I.getOperand(1);
590
591 // Does "C op A" simplify?
592 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
593 // It simplifies to V. Form "V op B".
594 replaceOperand(I, 0, V);
595 replaceOperand(I, 1, B);
596 // Conservatively clear the optional flags, since they may not be
597 // preserved by the reassociation.
599 Changed = true;
600 ++NumReassoc;
601 continue;
602 }
603 }
604
605 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
606 if (Op1 && Op1->getOpcode() == Opcode) {
607 Value *A = I.getOperand(0);
608 Value *B = Op1->getOperand(0);
609 Value *C = Op1->getOperand(1);
610
611 // Does "C op A" simplify?
612 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
613 // It simplifies to V. Form "B op V".
614 replaceOperand(I, 0, B);
615 replaceOperand(I, 1, V);
616 // Conservatively clear the optional flags, since they may not be
617 // preserved by the reassociation.
619 Changed = true;
620 ++NumReassoc;
621 continue;
622 }
623 }
624
625 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
626 // if C1 and C2 are constants.
627 Value *A, *B;
628 Constant *C1, *C2, *CRes;
629 if (Op0 && Op1 &&
630 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
631 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
632 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
633 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
634 bool IsNUW = hasNoUnsignedWrap(I) &&
635 hasNoUnsignedWrap(*Op0) &&
636 hasNoUnsignedWrap(*Op1);
637 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
638 BinaryOperator::CreateNUW(Opcode, A, B) :
639 BinaryOperator::Create(Opcode, A, B);
640
641 if (isa<FPMathOperator>(NewBO)) {
642 FastMathFlags Flags = I.getFastMathFlags() &
643 Op0->getFastMathFlags() &
644 Op1->getFastMathFlags();
645 NewBO->setFastMathFlags(Flags);
646 }
647 InsertNewInstWith(NewBO, I.getIterator());
648 NewBO->takeName(Op1);
649 replaceOperand(I, 0, NewBO);
650 replaceOperand(I, 1, CRes);
651 // Conservatively clear the optional flags, since they may not be
652 // preserved by the reassociation.
654 if (IsNUW)
655 I.setHasNoUnsignedWrap(true);
656
657 Changed = true;
658 continue;
659 }
660 }
661
662 // No further simplifications.
663 return Changed;
664 } while (true);
665}
666
667/// Return whether "X LOp (Y ROp Z)" is always equal to
668/// "(X LOp Y) ROp (X LOp Z)".
671 // X & (Y | Z) <--> (X & Y) | (X & Z)
672 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
673 if (LOp == Instruction::And)
674 return ROp == Instruction::Or || ROp == Instruction::Xor;
675
676 // X | (Y & Z) <--> (X | Y) & (X | Z)
677 if (LOp == Instruction::Or)
678 return ROp == Instruction::And;
679
680 // X * (Y + Z) <--> (X * Y) + (X * Z)
681 // X * (Y - Z) <--> (X * Y) - (X * Z)
682 if (LOp == Instruction::Mul)
683 return ROp == Instruction::Add || ROp == Instruction::Sub;
684
685 return false;
686}
687
688/// Return whether "(X LOp Y) ROp Z" is always equal to
689/// "(X ROp Z) LOp (Y ROp Z)".
693 return leftDistributesOverRight(ROp, LOp);
694
695 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
697
698 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
699 // but this requires knowing that the addition does not overflow and other
700 // such subtleties.
701}
702
703/// This function returns identity value for given opcode, which can be used to
704/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
706 if (isa<Constant>(V))
707 return nullptr;
708
709 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
710}
711
712/// This function predicates factorization using distributive laws. By default,
713/// it just returns the 'Op' inputs. But for special-cases like
714/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
715/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
716/// allow more factorization opportunities.
719 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
720 assert(Op && "Expected a binary operator");
721 LHS = Op->getOperand(0);
722 RHS = Op->getOperand(1);
723 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
724 Constant *C;
725 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
726 // X << C --> X * (1 << C)
728 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
729 assert(RHS && "Constant folding of immediate constants failed");
730 return Instruction::Mul;
731 }
732 // TODO: We can add other conversions e.g. shr => div etc.
733 }
734 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
735 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
737 // lshr nneg C, X --> ashr nneg C, X
738 return Instruction::AShr;
739 }
740 }
741 return Op->getOpcode();
742}
743
744/// This tries to simplify binary operations by factorizing out common terms
745/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
748 Instruction::BinaryOps InnerOpcode, Value *A,
749 Value *B, Value *C, Value *D) {
750 assert(A && B && C && D && "All values must be provided");
751
752 Value *V = nullptr;
753 Value *RetVal = nullptr;
754 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
755 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
756
757 // Does "X op' Y" always equal "Y op' X"?
758 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
759
760 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
761 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
762 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
763 // commutative case, "(A op' B) op (C op' A)"?
764 if (A == C || (InnerCommutative && A == D)) {
765 if (A != C)
766 std::swap(C, D);
767 // Consider forming "A op' (B op D)".
768 // If "B op D" simplifies then it can be formed with no cost.
769 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
770
771 // If "B op D" doesn't simplify then only go on if one of the existing
772 // operations "A op' B" and "C op' D" will be zapped as no longer used.
773 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
774 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
775 if (V)
776 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
777 }
778 }
779
780 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
781 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
782 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
783 // commutative case, "(A op' B) op (B op' D)"?
784 if (B == D || (InnerCommutative && B == C)) {
785 if (B != D)
786 std::swap(C, D);
787 // Consider forming "(A op C) op' B".
788 // If "A op C" simplifies then it can be formed with no cost.
789 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
790
791 // If "A op C" doesn't simplify then only go on if one of the existing
792 // operations "A op' B" and "C op' D" will be zapped as no longer used.
793 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
794 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
795 if (V)
796 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
797 }
798 }
799
800 if (!RetVal)
801 return nullptr;
802
803 ++NumFactor;
804 RetVal->takeName(&I);
805
806 // Try to add no-overflow flags to the final value.
807 if (isa<BinaryOperator>(RetVal)) {
808 bool HasNSW = false;
809 bool HasNUW = false;
811 HasNSW = I.hasNoSignedWrap();
812 HasNUW = I.hasNoUnsignedWrap();
813 }
814 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
815 HasNSW &= LOBO->hasNoSignedWrap();
816 HasNUW &= LOBO->hasNoUnsignedWrap();
817 }
818
819 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
820 HasNSW &= ROBO->hasNoSignedWrap();
821 HasNUW &= ROBO->hasNoUnsignedWrap();
822 }
823
824 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
825 // We can propagate 'nsw' if we know that
826 // %Y = mul nsw i16 %X, C
827 // %Z = add nsw i16 %Y, %X
828 // =>
829 // %Z = mul nsw i16 %X, C+1
830 //
831 // iff C+1 isn't INT_MIN
832 const APInt *CInt;
833 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
834 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
835
836 // nuw can be propagated with any constant or nuw value.
837 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
838 }
839 }
840 return RetVal;
841}
842
843// If `I` has one Const operand and the other matches `(ctpop (not x))`,
844// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
845// This is only useful is the new subtract can fold so we only handle the
846// following cases:
847// 1) (add/sub/disjoint_or C, (ctpop (not x))
848// -> (add/sub/disjoint_or C', (ctpop x))
849// 1) (cmp pred C, (ctpop (not x))
850// -> (cmp pred C', (ctpop x))
852 unsigned Opc = I->getOpcode();
853 unsigned ConstIdx = 1;
854 switch (Opc) {
855 default:
856 return nullptr;
857 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
858 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
859 // is constant.
860 case Instruction::Sub:
861 ConstIdx = 0;
862 break;
863 case Instruction::ICmp:
864 // Signed predicates aren't correct in some edge cases like for i2 types, as
865 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
866 // comparisons against it are simplfied to unsigned.
867 if (cast<ICmpInst>(I)->isSigned())
868 return nullptr;
869 break;
870 case Instruction::Or:
871 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
872 return nullptr;
873 [[fallthrough]];
874 case Instruction::Add:
875 break;
876 }
877
878 Value *Op;
879 // Find ctpop.
880 if (!match(I->getOperand(1 - ConstIdx),
882 return nullptr;
883
884 Constant *C;
885 // Check other operand is ImmConstant.
886 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
887 return nullptr;
888
889 Type *Ty = Op->getType();
890 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
891 // Need extra check for icmp. Note if this check is true, it generally means
892 // the icmp will simplify to true/false.
893 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
894 Constant *Cmp =
896 if (!Cmp || !Cmp->isNullValue())
897 return nullptr;
898 }
899
900 // Check we can invert `(not x)` for free.
901 bool Consumes = false;
902 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
903 return nullptr;
904 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
905 assert(NotOp != nullptr &&
906 "Desync between isFreeToInvert and getFreelyInverted");
907
908 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
909
910 Value *R = nullptr;
911
912 // Do the transformation here to avoid potentially introducing an infinite
913 // loop.
914 switch (Opc) {
915 case Instruction::Sub:
916 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
917 break;
918 case Instruction::Or:
919 case Instruction::Add:
920 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
921 break;
922 case Instruction::ICmp:
923 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
924 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
925 break;
926 default:
927 llvm_unreachable("Unhandled Opcode");
928 }
929 assert(R != nullptr);
930 return replaceInstUsesWith(*I, R);
931}
932
933// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
934// IFF
935// 1) the logic_shifts match
936// 2) either both binops are binops and one is `and` or
937// BinOp1 is `and`
938// (logic_shift (inv_logic_shift C1, C), C) == C1 or
939//
940// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
941//
942// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
943// IFF
944// 1) the logic_shifts match
945// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
946//
947// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
948//
949// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
950// IFF
951// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
952// 2) Binop2 is `not`
953//
954// -> (arithmetic_shift Binop1((not X), Y), Amt)
955
957 const DataLayout &DL = I.getDataLayout();
958 auto IsValidBinOpc = [](unsigned Opc) {
959 switch (Opc) {
960 default:
961 return false;
962 case Instruction::And:
963 case Instruction::Or:
964 case Instruction::Xor:
965 case Instruction::Add:
966 // Skip Sub as we only match constant masks which will canonicalize to use
967 // add.
968 return true;
969 }
970 };
971
972 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
973 // constraints.
974 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
975 unsigned ShOpc) {
976 assert(ShOpc != Instruction::AShr);
977 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
978 ShOpc == Instruction::Shl;
979 };
980
981 auto GetInvShift = [](unsigned ShOpc) {
982 assert(ShOpc != Instruction::AShr);
983 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
984 };
985
986 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
987 unsigned ShOpc, Constant *CMask,
988 Constant *CShift) {
989 // If the BinOp1 is `and` we don't need to check the mask.
990 if (BinOpc1 == Instruction::And)
991 return true;
992
993 // For all other possible transfers we need complete distributable
994 // binop/shift (anything but `add` + `lshr`).
995 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
996 return false;
997
998 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
999 // vecs, otherwise the mask will be simplified and the following check will
1000 // handle it).
1001 if (BinOpc2 == Instruction::And)
1002 return true;
1003
1004 // Otherwise, need mask that meets the below requirement.
1005 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1006 Constant *MaskInvShift =
1007 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1008 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1009 CMask;
1010 };
1011
1012 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1013 Constant *CMask, *CShift;
1014 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1015 if (!match(I.getOperand(ShOpnum),
1016 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1017 return nullptr;
1018 if (!match(
1019 I.getOperand(1 - ShOpnum),
1022 m_Value(ShiftedX)),
1023 m_Value(Mask)))))
1024 return nullptr;
1025 // Make sure we are matching instruction shifts and not ConstantExpr
1026 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1027 auto *IX = dyn_cast<Instruction>(ShiftedX);
1028 if (!IY || !IX)
1029 return nullptr;
1030
1031 // LHS and RHS need same shift opcode
1032 unsigned ShOpc = IY->getOpcode();
1033 if (ShOpc != IX->getOpcode())
1034 return nullptr;
1035
1036 // Make sure binop is real instruction and not ConstantExpr
1037 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1038 if (!BO2)
1039 return nullptr;
1040
1041 unsigned BinOpc = BO2->getOpcode();
1042 // Make sure we have valid binops.
1043 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1044 return nullptr;
1045
1046 if (ShOpc == Instruction::AShr) {
1047 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1048 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1049 Value *NotX = Builder.CreateNot(X);
1050 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1052 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1053 }
1054
1055 return nullptr;
1056 }
1057
1058 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1059 // distribute to drop the shift irrelevant of constants.
1060 if (BinOpc == I.getOpcode() &&
1061 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1062 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1063 Value *NewBinOp1 = Builder.CreateBinOp(
1064 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1065 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1066 }
1067
1068 // Otherwise we can only distribute by constant shifting the mask, so
1069 // ensure we have constants.
1070 if (!match(Shift, m_ImmConstant(CShift)))
1071 return nullptr;
1072 if (!match(Mask, m_ImmConstant(CMask)))
1073 return nullptr;
1074
1075 // Check if we can distribute the binops.
1076 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1077 return nullptr;
1078
1079 Constant *NewCMask =
1080 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1081 Value *NewBinOp2 = Builder.CreateBinOp(
1082 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1083 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1084 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1085 NewBinOp1, CShift);
1086 };
1087
1088 if (Instruction *R = MatchBinOp(0))
1089 return R;
1090 return MatchBinOp(1);
1091}
1092
1093// (Binop (zext C), (select C, T, F))
1094// -> (select C, (binop 1, T), (binop 0, F))
1095//
1096// (Binop (sext C), (select C, T, F))
1097// -> (select C, (binop -1, T), (binop 0, F))
1098//
1099// Attempt to simplify binary operations into a select with folded args, when
1100// one operand of the binop is a select instruction and the other operand is a
1101// zext/sext extension, whose value is the select condition.
1104 // TODO: this simplification may be extended to any speculatable instruction,
1105 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1106 Instruction::BinaryOps Opc = I.getOpcode();
1107 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1108 Value *A, *CondVal, *TrueVal, *FalseVal;
1109 Value *CastOp;
1110 Constant *CastTrueVal, *CastFalseVal;
1111
1112 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1113 return match(CastOp, m_SelectLike(m_Value(A), m_Constant(CastTrueVal),
1114 m_Constant(CastFalseVal))) &&
1115 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1116 m_Value(FalseVal)));
1117 };
1118
1119 // Make sure one side of the binop is a select instruction, and the other is a
1120 // zero/sign extension operating on a i1.
1121 if (MatchSelectAndCast(LHS, RHS))
1122 CastOp = LHS;
1123 else if (MatchSelectAndCast(RHS, LHS))
1124 CastOp = RHS;
1125 else
1126 return nullptr;
1127
1129 ? nullptr
1130 : cast<SelectInst>(CastOp == LHS ? RHS : LHS);
1131
1132 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1133 bool IsCastOpRHS = (CastOp == RHS);
1134 Value *CastVal = IsTrueArm ? CastFalseVal : CastTrueVal;
1135
1136 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, CastVal)
1137 : Builder.CreateBinOp(Opc, CastVal, V);
1138 };
1139
1140 // If the value used in the zext/sext is the select condition, or the negated
1141 // of the select condition, the binop can be simplified.
1142 if (CondVal == A) {
1143 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1144 return SelectInst::Create(CondVal, NewTrueVal,
1145 NewFoldedConst(true, FalseVal), "", nullptr, SI);
1146 }
1147 if (match(A, m_Not(m_Specific(CondVal)))) {
1148 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1149 return SelectInst::Create(CondVal, NewTrueVal,
1150 NewFoldedConst(false, FalseVal), "", nullptr, SI);
1151 }
1152
1153 return nullptr;
1154}
1155
1157 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1160 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1161 Value *A, *B, *C, *D;
1162 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1163
1164 if (Op0)
1165 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1166 if (Op1)
1167 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1168
1169 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1170 // a common term.
1171 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1172 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1173 return V;
1174
1175 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1176 // term.
1177 if (Op0)
1178 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1179 if (Value *V =
1180 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1181 return V;
1182
1183 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1184 // term.
1185 if (Op1)
1186 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1187 if (Value *V =
1188 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1189 return V;
1190
1191 return nullptr;
1192}
1193
1194/// This tries to simplify binary operations which some other binary operation
1195/// distributes over either by factorizing out common terms
1196/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1197/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1198/// Returns the simplified value, or null if it didn't simplify.
1200 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1203 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1204
1205 // Factorization.
1206 if (Value *R = tryFactorizationFolds(I))
1207 return R;
1208
1209 // Expansion.
1210 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1211 // The instruction has the form "(A op' B) op C". See if expanding it out
1212 // to "(A op C) op' (B op C)" results in simplifications.
1213 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1214 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1215
1216 // Disable the use of undef because it's not safe to distribute undef.
1217 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1218 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1219 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1220
1221 // Do "A op C" and "B op C" both simplify?
1222 if (L && R) {
1223 // They do! Return "L op' R".
1224 ++NumExpand;
1225 C = Builder.CreateBinOp(InnerOpcode, L, R);
1226 C->takeName(&I);
1227 return C;
1228 }
1229
1230 // Does "A op C" simplify to the identity value for the inner opcode?
1231 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1232 // They do! Return "B op C".
1233 ++NumExpand;
1234 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1235 C->takeName(&I);
1236 return C;
1237 }
1238
1239 // Does "B op C" simplify to the identity value for the inner opcode?
1240 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1241 // They do! Return "A op C".
1242 ++NumExpand;
1243 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1244 C->takeName(&I);
1245 return C;
1246 }
1247 }
1248
1249 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1250 // The instruction has the form "A op (B op' C)". See if expanding it out
1251 // to "(A op B) op' (A op C)" results in simplifications.
1252 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1253 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1254
1255 // Disable the use of undef because it's not safe to distribute undef.
1256 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1257 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1258 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1259
1260 // Do "A op B" and "A op C" both simplify?
1261 if (L && R) {
1262 // They do! Return "L op' R".
1263 ++NumExpand;
1264 A = Builder.CreateBinOp(InnerOpcode, L, R);
1265 A->takeName(&I);
1266 return A;
1267 }
1268
1269 // Does "A op B" simplify to the identity value for the inner opcode?
1270 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1271 // They do! Return "A op C".
1272 ++NumExpand;
1273 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1274 A->takeName(&I);
1275 return A;
1276 }
1277
1278 // Does "A op C" simplify to the identity value for the inner opcode?
1279 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1280 // They do! Return "A op B".
1281 ++NumExpand;
1282 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1283 A->takeName(&I);
1284 return A;
1285 }
1286 }
1287
1288 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1289}
1290
1291static std::optional<std::pair<Value *, Value *>>
1293 if (LHS->getParent() != RHS->getParent())
1294 return std::nullopt;
1295
1296 if (LHS->getNumIncomingValues() < 2)
1297 return std::nullopt;
1298
1299 if (!equal(LHS->blocks(), RHS->blocks()))
1300 return std::nullopt;
1301
1302 Value *L0 = LHS->getIncomingValue(0);
1303 Value *R0 = RHS->getIncomingValue(0);
1304
1305 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1306 Value *L1 = LHS->getIncomingValue(I);
1307 Value *R1 = RHS->getIncomingValue(I);
1308
1309 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1310 continue;
1311
1312 return std::nullopt;
1313 }
1314
1315 return std::optional(std::pair(L0, R0));
1316}
1317
1318std::optional<std::pair<Value *, Value *>>
1319InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1322 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1323 return std::nullopt;
1324 switch (LHSInst->getOpcode()) {
1325 case Instruction::PHI:
1327 case Instruction::Select: {
1328 Value *Cond = LHSInst->getOperand(0);
1329 Value *TrueVal = LHSInst->getOperand(1);
1330 Value *FalseVal = LHSInst->getOperand(2);
1331 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1332 FalseVal == RHSInst->getOperand(1))
1333 return std::pair(TrueVal, FalseVal);
1334 return std::nullopt;
1335 }
1336 case Instruction::Call: {
1337 // Match min(a, b) and max(a, b)
1338 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1339 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1340 if (LHSMinMax && RHSMinMax &&
1341 LHSMinMax->getPredicate() ==
1343 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1344 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1345 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1346 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1347 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1348 return std::nullopt;
1349 }
1350 default:
1351 return std::nullopt;
1352 }
1353}
1354
1356 Value *LHS,
1357 Value *RHS) {
1358 Value *A, *B, *C, *D, *E, *F;
1359 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1360 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1361 if (!LHSIsSelect && !RHSIsSelect)
1362 return nullptr;
1363
1365 ? nullptr
1366 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1367
1368 FastMathFlags FMF;
1370 if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
1371 FMF = FPOp->getFastMathFlags();
1372 Builder.setFastMathFlags(FMF);
1373 }
1374
1375 Instruction::BinaryOps Opcode = I.getOpcode();
1376 SimplifyQuery Q = SQ.getWithInstruction(&I);
1377
1378 Value *Cond, *True = nullptr, *False = nullptr;
1379
1380 // Special-case for add/negate combination. Replace the zero in the negation
1381 // with the trailing add operand:
1382 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1383 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1384 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1385 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1386 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1387 return nullptr;
1388 Value *N;
1389 if (True && match(FVal, m_Neg(m_Value(N)))) {
1390 Value *Sub = Builder.CreateSub(Z, N);
1391 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1392 }
1393 if (False && match(TVal, m_Neg(m_Value(N)))) {
1394 Value *Sub = Builder.CreateSub(Z, N);
1395 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1396 }
1397 return nullptr;
1398 };
1399
1400 if (LHSIsSelect && RHSIsSelect && A == D) {
1401 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1402 Cond = A;
1403 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1404 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1405
1406 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1407 if (False && !True)
1408 True = Builder.CreateBinOp(Opcode, B, E);
1409 else if (True && !False)
1410 False = Builder.CreateBinOp(Opcode, C, F);
1411 }
1412 } else if (LHSIsSelect && LHS->hasOneUse()) {
1413 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1414 Cond = A;
1415 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1416 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1417 if (Value *NewSel = foldAddNegate(B, C, RHS))
1418 return NewSel;
1419 } else if (RHSIsSelect && RHS->hasOneUse()) {
1420 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1421 Cond = D;
1422 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1423 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1424 if (Value *NewSel = foldAddNegate(E, F, LHS))
1425 return NewSel;
1426 }
1427
1428 if (!True || !False)
1429 return nullptr;
1430
1431 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1432 NewSI->takeName(&I);
1433 return NewSI;
1434}
1435
1436/// Freely adapt every user of V as-if V was changed to !V.
1437/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1439 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1440 for (User *U : make_early_inc_range(I->users())) {
1441 if (U == IgnoredUser)
1442 continue; // Don't consider this user.
1443 switch (cast<Instruction>(U)->getOpcode()) {
1444 case Instruction::Select: {
1445 auto *SI = cast<SelectInst>(U);
1446 SI->swapValues();
1447 SI->swapProfMetadata();
1448 break;
1449 }
1450 case Instruction::CondBr: {
1452 BI->swapSuccessors(); // swaps prof metadata too
1453 if (BPI)
1454 BPI->swapSuccEdgesProbabilities(BI->getParent());
1455 break;
1456 }
1457 case Instruction::Xor:
1459 // Add to worklist for DCE.
1461 break;
1462 default:
1463 llvm_unreachable("Got unexpected user - out of sync with "
1464 "canFreelyInvertAllUsersOf() ?");
1465 }
1466 }
1467
1468 // Update pre-existing debug value uses.
1469 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1470 llvm::findDbgValues(I, DbgVariableRecords);
1471
1472 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1473 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1474 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1475 Idx != End; ++Idx)
1476 if (DbgVal->getVariableLocationOp(Idx) == I)
1477 DbgVal->setExpression(
1478 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1479 }
1480}
1481
1482/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1483/// constant zero (which is the 'negate' form).
1484Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1485 Value *NegV;
1486 if (match(V, m_Neg(m_Value(NegV))))
1487 return NegV;
1488
1489 // Constants can be considered to be negated values if they can be folded.
1491 return ConstantExpr::getNeg(C);
1492
1494 if (C->getType()->getElementType()->isIntegerTy())
1495 return ConstantExpr::getNeg(C);
1496
1498 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1499 Constant *Elt = CV->getAggregateElement(i);
1500 if (!Elt)
1501 return nullptr;
1502
1503 if (isa<UndefValue>(Elt))
1504 continue;
1505
1506 if (!isa<ConstantInt>(Elt))
1507 return nullptr;
1508 }
1509 return ConstantExpr::getNeg(CV);
1510 }
1511
1512 // Negate integer vector splats.
1513 if (auto *CV = dyn_cast<Constant>(V))
1514 if (CV->getType()->isVectorTy() &&
1515 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1516 return ConstantExpr::getNeg(CV);
1517
1518 return nullptr;
1519}
1520
1521// Try to fold:
1522// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1523// -> ({s|u}itofp (int_binop x, y))
1524// 2) (fp_binop ({s|u}itofp x), FpC)
1525// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1526//
1527// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1528Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1529 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1531
1532 Type *FPTy = BO.getType();
1533 Type *IntTy = IntOps[0]->getType();
1534
1535 unsigned IntSz = IntTy->getScalarSizeInBits();
1536 // This is the maximum number of inuse bits by the integer where the int -> fp
1537 // casts are exact.
1538 unsigned MaxRepresentableBits =
1540
1541 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1542 // checks later on.
1543 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1544
1545 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1546 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1547 auto IsNonZero = [&](unsigned OpNo) -> bool {
1548 if (OpsKnown[OpNo].hasKnownBits() &&
1549 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1550 return true;
1551 return isKnownNonZero(IntOps[OpNo], SQ);
1552 };
1553
1554 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1555 // NB: This matches the impl in ValueTracking, we just try to use cached
1556 // knownbits here. If we ever start supporting WithCache for
1557 // `isKnownNonNegative`, change this to an explicit call.
1558 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1559 };
1560
1561 // Check if we know for certain that ({s|u}itofp op) is exact.
1562 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1563 // Can we treat this operand as the desired sign?
1564 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1565 !IsNonNeg(OpNo))
1566 return false;
1567
1568 // If fp precision >= bitwidth(op) then its exact.
1569 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1570 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1571 // handled specially. We can't, however, increase the bound arbitrarily for
1572 // `sitofp` as for larger sizes, it won't sign extend.
1573 if (MaxRepresentableBits < IntSz) {
1574 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1575 // numSignBits(op).
1576 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1577 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1578 if (OpsFromSigned)
1579 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1580 // Finally for unsigned check that fp precision >= bitwidth(op) -
1581 // numLeadingZeros(op).
1582 else {
1583 NumUsedLeadingBits[OpNo] =
1584 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1585 }
1586 }
1587 // NB: We could also check if op is known to be a power of 2 or zero (which
1588 // will always be representable). Its unlikely, however, that is we are
1589 // unable to bound op in any way we will be able to pass the overflow checks
1590 // later on.
1591
1592 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1593 return false;
1594 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1595 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1596 IsNonZero(OpNo);
1597 };
1598
1599 // If we have a constant rhs, see if we can losslessly convert it to an int.
1600 if (Op1FpC != nullptr) {
1601 // Signed + Mul req non-zero
1602 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1603 !match(Op1FpC, m_NonZeroFP()))
1604 return nullptr;
1605
1607 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1608 IntTy, DL);
1609 if (Op1IntC == nullptr)
1610 return nullptr;
1611 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1612 : Instruction::UIToFP,
1613 Op1IntC, FPTy, DL) != Op1FpC)
1614 return nullptr;
1615
1616 // First try to keep sign of cast the same.
1617 IntOps[1] = Op1IntC;
1618 }
1619
1620 // Ensure lhs/rhs integer types match.
1621 if (IntTy != IntOps[1]->getType())
1622 return nullptr;
1623
1624 if (Op1FpC == nullptr) {
1625 if (!IsValidPromotion(1))
1626 return nullptr;
1627 }
1628 if (!IsValidPromotion(0))
1629 return nullptr;
1630
1631 // Final we check if the integer version of the binop will not overflow.
1633 // Because of the precision check, we can often rule out overflows.
1634 bool NeedsOverflowCheck = true;
1635 // Try to conservatively rule out overflow based on the already done precision
1636 // checks.
1637 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1638 unsigned OverflowMaxCurBits =
1639 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1640 bool OutputSigned = OpsFromSigned;
1641 switch (BO.getOpcode()) {
1642 case Instruction::FAdd:
1643 IntOpc = Instruction::Add;
1644 OverflowMaxOutputBits += OverflowMaxCurBits;
1645 break;
1646 case Instruction::FSub:
1647 IntOpc = Instruction::Sub;
1648 OverflowMaxOutputBits += OverflowMaxCurBits;
1649 break;
1650 case Instruction::FMul:
1651 IntOpc = Instruction::Mul;
1652 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1653 break;
1654 default:
1655 llvm_unreachable("Unsupported binop");
1656 }
1657 // The precision check may have already ruled out overflow.
1658 if (OverflowMaxOutputBits < IntSz) {
1659 NeedsOverflowCheck = false;
1660 // We can bound unsigned overflow from sub to in range signed value (this is
1661 // what allows us to avoid the overflow check for sub).
1662 if (IntOpc == Instruction::Sub)
1663 OutputSigned = true;
1664 }
1665
1666 // Precision check did not rule out overflow, so need to check.
1667 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1668 // `IntOps[...]` arguments to `KnownOps[...]`.
1669 if (NeedsOverflowCheck &&
1670 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1671 return nullptr;
1672
1673 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1674 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1675 IntBO->setHasNoSignedWrap(OutputSigned);
1676 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1677 }
1678 if (OutputSigned)
1679 return new SIToFPInst(IntBinOp, FPTy);
1680 return new UIToFPInst(IntBinOp, FPTy);
1681}
1682
1683// Try to fold:
1684// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1685// -> ({s|u}itofp (int_binop x, y))
1686// 2) (fp_binop ({s|u}itofp x), FpC)
1687// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1688Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1689 // Don't perform the fold on vectors, as the integer operation may be much
1690 // more expensive than the float operation in that case.
1691 if (BO.getType()->isVectorTy())
1692 return nullptr;
1693
1694 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1695 Constant *Op1FpC = nullptr;
1696 // Check for:
1697 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1698 // 2) (binop ({s|u}itofp x), FpC)
1699 if (!match(BO.getOperand(0), m_IToFP(m_Value(IntOps[0]))))
1700 return nullptr;
1701
1702 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1703 !match(BO.getOperand(1), m_IToFP(m_Value(IntOps[1]))))
1704 return nullptr;
1705
1706 // Cache KnownBits a bit to potentially save some analysis.
1707 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1708
1709 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1710 // different constraints depending on the sign of the cast.
1711 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1712 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1713 IntOps, Op1FpC, OpsKnown))
1714 return R;
1715 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1716 Op1FpC, OpsKnown);
1717}
1718
1719/// A binop with a constant operand and a sign-extended boolean operand may be
1720/// converted into a select of constants by applying the binary operation to
1721/// the constant with the two possible values of the extended boolean (0 or -1).
1722Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1723 // TODO: Handle non-commutative binop (constant is operand 0).
1724 // TODO: Handle zext.
1725 // TODO: Peek through 'not' of cast.
1726 Value *BO0 = BO.getOperand(0);
1727 Value *BO1 = BO.getOperand(1);
1728 Value *X;
1729 Constant *C;
1730 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1731 !X->getType()->isIntOrIntVectorTy(1))
1732 return nullptr;
1733
1734 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1737 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1738 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1739 return createSelectInstWithUnknownProfile(X, TVal, FVal);
1740}
1741
1743 bool IsTrueArm) {
1745 for (Value *Op : I.operands()) {
1746 Value *V = nullptr;
1747 if (Op == SI) {
1748 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1749 } else if (match(SI->getCondition(),
1752 m_Specific(Op), m_Value(V))) &&
1754 // Pass
1755 } else if (match(Op, m_ZExt(m_Specific(SI->getCondition())))) {
1756 V = IsTrueArm ? ConstantInt::get(Op->getType(), 1)
1757 : ConstantInt::getNullValue(Op->getType());
1758 } else {
1759 V = Op;
1760 }
1761 Ops.push_back(V);
1762 }
1763
1764 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1765}
1766
1768 Value *NewOp, InstCombiner &IC) {
1769 Instruction *Clone = I.clone();
1770 Clone->replaceUsesOfWith(SI, NewOp);
1772 IC.InsertNewInstBefore(Clone, I.getIterator());
1773 return Clone;
1774}
1775
1777 bool FoldWithMultiUse,
1778 bool SimplifyBothArms) {
1779 // Don't modify shared select instructions unless set FoldWithMultiUse
1780 if (!SI->hasOneUser() && !FoldWithMultiUse)
1781 return nullptr;
1782
1783 Value *TV = SI->getTrueValue();
1784 Value *FV = SI->getFalseValue();
1785
1786 // Bool selects with constant operands can be folded to logical ops.
1787 if (SI->getType()->isIntOrIntVectorTy(1))
1788 return nullptr;
1789
1790 // Avoid breaking min/max reduction pattern,
1791 // which is necessary for vectorization later.
1793 for (Value *IntrinOp : Op.operands())
1794 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1795 for (Value *PhiOp : PN->operands())
1796 if (PhiOp == &Op)
1797 return nullptr;
1798
1799 // Test if a FCmpInst instruction is used exclusively by a select as
1800 // part of a minimum or maximum operation. If so, refrain from doing
1801 // any other folding. This helps out other analyses which understand
1802 // non-obfuscated minimum and maximum idioms. And in this case, at
1803 // least one of the comparison operands has at least one user besides
1804 // the compare (the select), which would often largely negate the
1805 // benefit of folding anyway.
1806 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1807 if (CI->hasOneUse()) {
1808 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1809 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1810 !CI->isCommutative())
1811 return nullptr;
1812 }
1813 }
1814
1815 // Make sure that one of the select arms folds successfully.
1816 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1817 Value *NewFV =
1818 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1819 if (!NewTV && !NewFV)
1820 return nullptr;
1821
1822 if (SimplifyBothArms && !(NewTV && NewFV))
1823 return nullptr;
1824
1825 // Create an instruction for the arm that did not fold.
1826 if (!NewTV)
1827 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1828 if (!NewFV)
1829 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1830 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1831}
1832
1834 Value *InValue, BasicBlock *InBB,
1835 const DataLayout &DL,
1836 const SimplifyQuery SQ) {
1837 // NB: It is a precondition of this transform that the operands be
1838 // phi translatable!
1840 for (Value *Op : I.operands()) {
1841 if (Op == PN)
1842 Ops.push_back(InValue);
1843 else
1844 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1845 }
1846
1847 // Don't consider the simplification successful if we get back a constant
1848 // expression. That's just an instruction in hiding.
1849 // Also reject the case where we simplify back to the phi node. We wouldn't
1850 // be able to remove it in that case.
1852 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1853 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1854 return NewVal;
1855
1856 // Check if incoming PHI value can be replaced with constant
1857 // based on implied condition.
1858 CondBrInst *TerminatorBI = dyn_cast<CondBrInst>(InBB->getTerminator());
1859 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1860 if (TerminatorBI &&
1861 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1862 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1863 std::optional<bool> ImpliedCond = isImpliedCondition(
1864 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1865 DL, LHSIsTrue);
1866 if (ImpliedCond)
1867 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1868 }
1869
1870 return nullptr;
1871}
1872
1873/// In some cases it is beneficial to fold a select into a binary operator.
1874/// For example:
1875/// %1 = or %in, 4
1876/// %2 = select %cond, %1, %in
1877/// %3 = or %2, 1
1878/// =>
1879/// %1 = select i1 %cond, 5, 1
1880/// %2 = or %1, %in
1882 assert(Op.isAssociative() && "The operation must be associative!");
1883
1884 SelectInst *SI = dyn_cast<SelectInst>(Op.getOperand(0));
1885
1886 Constant *Const;
1887 if (!SI || !match(Op.getOperand(1), m_ImmConstant(Const)) ||
1888 !Op.hasOneUse() || !SI->hasOneUse())
1889 return nullptr;
1890
1891 Value *TV = SI->getTrueValue();
1892 Value *FV = SI->getFalseValue();
1893 Value *Input, *NewTV, *NewFV;
1894 Constant *Const2;
1895
1896 if (TV->hasOneUse() && match(TV, m_BinOp(Op.getOpcode(), m_Specific(FV),
1897 m_ImmConstant(Const2)))) {
1898 NewTV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1899 NewFV = Const;
1900 Input = FV;
1901 } else if (FV->hasOneUse() &&
1902 match(FV, m_BinOp(Op.getOpcode(), m_Specific(TV),
1903 m_ImmConstant(Const2)))) {
1904 NewTV = Const;
1905 NewFV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1906 Input = TV;
1907 } else
1908 return nullptr;
1909
1910 if (!NewTV || !NewFV)
1911 return nullptr;
1912
1913 Value *NewSI =
1914 Builder.CreateSelect(SI->getCondition(), NewTV, NewFV, "",
1915 ProfcheckDisableMetadataFixes ? nullptr : SI);
1916 return BinaryOperator::Create(Op.getOpcode(), NewSI, Input);
1917}
1918
1920 bool AllowMultipleUses) {
1921 unsigned NumPHIValues = PN->getNumIncomingValues();
1922 if (NumPHIValues == 0)
1923 return nullptr;
1924
1925 // We normally only transform phis with a single use. However, if a PHI has
1926 // multiple uses and they are all the same operation, we can fold *all* of the
1927 // uses into the PHI.
1928 bool OneUse = PN->hasOneUse();
1929 bool IdenticalUsers = false;
1930 if (!AllowMultipleUses && !OneUse) {
1931 // Walk the use list for the instruction, comparing them to I.
1932 for (User *U : PN->users()) {
1934 if (UI != &I && !I.isIdenticalTo(UI))
1935 return nullptr;
1936 }
1937 // Otherwise, we can replace *all* users with the new PHI we form.
1938 IdenticalUsers = true;
1939 }
1940
1941 // Check that all operands are phi-translatable.
1942 for (Value *Op : I.operands()) {
1943 if (Op == PN)
1944 continue;
1945
1946 // Non-instructions never require phi-translation.
1947 auto *I = dyn_cast<Instruction>(Op);
1948 if (!I)
1949 continue;
1950
1951 // Phi-translate can handle phi nodes in the same block.
1952 if (isa<PHINode>(I))
1953 if (I->getParent() == PN->getParent())
1954 continue;
1955
1956 // Operand dominates the block, no phi-translation necessary.
1957 if (DT.dominates(I, PN->getParent()))
1958 continue;
1959
1960 // Not phi-translatable, bail out.
1961 return nullptr;
1962 }
1963
1964 // Check to see whether the instruction can be folded into each phi operand.
1965 // If there is one operand that does not fold, remember the BB it is in.
1966 SmallVector<Value *> NewPhiValues;
1967 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1968 bool SeenNonSimplifiedInVal = false;
1969 for (unsigned i = 0; i != NumPHIValues; ++i) {
1970 Value *InVal = PN->getIncomingValue(i);
1971 BasicBlock *InBB = PN->getIncomingBlock(i);
1972
1973 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1974 NewPhiValues.push_back(NewVal);
1975 continue;
1976 }
1977
1978 // Handle some cases that can't be fully simplified, but where we know that
1979 // the two instructions will fold into one.
1980 auto WillFold = [&]() {
1981 if (!InVal->hasUseList() || !InVal->hasOneUser())
1982 return false;
1983
1984 // icmp of ucmp/scmp with constant will fold to icmp.
1985 const APInt *Ignored;
1986 if (isa<CmpIntrinsic>(InVal) &&
1987 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1988 return true;
1989
1990 // icmp eq zext(bool), 0 will fold to !bool.
1991 if (isa<ZExtInst>(InVal) &&
1992 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1993 match(&I,
1995 return true;
1996
1997 return false;
1998 };
1999
2000 if (WillFold()) {
2001 OpsToMoveUseToIncomingBB.push_back(i);
2002 NewPhiValues.push_back(nullptr);
2003 continue;
2004 }
2005
2006 if (!OneUse && !IdenticalUsers)
2007 return nullptr;
2008
2009 if (SeenNonSimplifiedInVal)
2010 return nullptr; // More than one non-simplified value.
2011 SeenNonSimplifiedInVal = true;
2012
2013 // If there is exactly one non-simplified value, we can insert a copy of the
2014 // operation in that block. However, if this is a critical edge, we would
2015 // be inserting the computation on some other paths (e.g. inside a loop).
2016 // Only do this if the pred block is unconditionally branching into the phi
2017 // block. Also, make sure that the pred block is not dead code.
2019 if (!BI || !DT.isReachableFromEntry(InBB))
2020 return nullptr;
2021
2022 NewPhiValues.push_back(nullptr);
2023 OpsToMoveUseToIncomingBB.push_back(i);
2024
2025 // Do not push the operation across a loop backedge. This could result in
2026 // an infinite combine loop, and is generally non-profitable (especially
2027 // if the operation was originally outside the loop).
2028 if (isBackEdge(InBB, PN->getParent()))
2029 return nullptr;
2030 }
2031
2032 // Clone the instruction that uses the phi node and move it into the incoming
2033 // BB because we know that the next iteration of InstCombine will simplify it.
2035 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
2037 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
2038
2039 Instruction *Clone = Clones.lookup(OpBB);
2040 if (!Clone) {
2041 Clone = I.clone();
2042 for (Use &U : Clone->operands()) {
2043 if (U == PN)
2044 U = Op;
2045 else
2046 U = U->DoPHITranslation(PN->getParent(), OpBB);
2047 }
2048 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2049 Clones.insert({OpBB, Clone});
2050 // We may have speculated the instruction.
2052 }
2053
2054 NewPhiValues[OpIndex] = Clone;
2055 }
2056
2057 // Okay, we can do the transformation: create the new PHI node.
2058 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2059 InsertNewInstBefore(NewPN, PN->getIterator());
2060 NewPN->takeName(PN);
2061 NewPN->setDebugLoc(PN->getDebugLoc());
2062
2063 for (unsigned i = 0; i != NumPHIValues; ++i)
2064 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2065
2066 if (IdenticalUsers) {
2067 // Collect and deduplicate users up-front to avoid iterator invalidation.
2069 for (User *U : PN->users()) {
2071 if (User == &I)
2072 continue;
2073 ToReplace.insert(User);
2074 }
2075 for (Instruction *I : ToReplace) {
2076 replaceInstUsesWith(*I, NewPN);
2078 }
2079 OneUse = true;
2080 }
2081
2082 if (OneUse) {
2083 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2084 }
2085 return replaceInstUsesWith(I, NewPN);
2086}
2087
2089 if (!BO.isAssociative())
2090 return nullptr;
2091
2092 // Find the interleaved binary ops.
2093 auto Opc = BO.getOpcode();
2094 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2095 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2096 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2097 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2098 !BO0->isAssociative() || !BO1->isAssociative() ||
2099 BO0->getParent() != BO1->getParent())
2100 return nullptr;
2101
2102 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2103 "Expected commutative instructions!");
2104
2105 // Find the matching phis, forming the recurrences.
2106 PHINode *PN0, *PN1;
2107 Value *Start0, *Step0, *Start1, *Step1;
2108 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2109 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2110 PN0->getParent() != PN1->getParent())
2111 return nullptr;
2112
2113 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2114 "Expected PHIs with two incoming values!");
2115
2116 // Convert the start and step values to constants.
2117 auto *Init0 = dyn_cast<Constant>(Start0);
2118 auto *Init1 = dyn_cast<Constant>(Start1);
2119 auto *C0 = dyn_cast<Constant>(Step0);
2120 auto *C1 = dyn_cast<Constant>(Step1);
2121 if (!Init0 || !Init1 || !C0 || !C1)
2122 return nullptr;
2123
2124 // Fold the recurrence constants.
2125 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2126 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2127 if (!Init || !C)
2128 return nullptr;
2129
2130 // Create the reduced PHI.
2131 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2132 "reduced.phi");
2133
2134 // Create the new binary op.
2135 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2136 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2137 // Intersect FMF flags for FADD and FMUL.
2138 FastMathFlags Intersect = BO0->getFastMathFlags() &
2139 BO1->getFastMathFlags() & BO.getFastMathFlags();
2140 NewBO->setFastMathFlags(Intersect);
2141 } else {
2142 OverflowTracking Flags;
2143 Flags.AllKnownNonNegative = false;
2144 Flags.AllKnownNonZero = false;
2145 Flags.mergeFlags(*BO0);
2146 Flags.mergeFlags(*BO1);
2147 Flags.mergeFlags(BO);
2148 Flags.applyFlags(*NewBO);
2149 }
2150 NewBO->takeName(&BO);
2151
2152 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2153 auto *V = PN0->getIncomingValue(I);
2154 auto *BB = PN0->getIncomingBlock(I);
2155 if (V == Init0) {
2156 assert(((PN1->getIncomingValue(0) == Init1 &&
2157 PN1->getIncomingBlock(0) == BB) ||
2158 (PN1->getIncomingValue(1) == Init1 &&
2159 PN1->getIncomingBlock(1) == BB)) &&
2160 "Invalid incoming block!");
2161 NewPN->addIncoming(Init, BB);
2162 } else if (V == BO0) {
2163 assert(((PN1->getIncomingValue(0) == BO1 &&
2164 PN1->getIncomingBlock(0) == BB) ||
2165 (PN1->getIncomingValue(1) == BO1 &&
2166 PN1->getIncomingBlock(1) == BB)) &&
2167 "Invalid incoming block!");
2168 NewPN->addIncoming(NewBO, BB);
2169 } else
2170 llvm_unreachable("Unexpected incoming value!");
2171 }
2172
2173 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2174 << "\n with " << *PN1 << "\n " << *BO1
2175 << '\n');
2176
2177 // Insert the new recurrence and remove the old (dead) ones.
2178 InsertNewInstWith(NewPN, PN0->getIterator());
2179 InsertNewInstWith(NewBO, BO0->getIterator());
2180
2187
2188 return replaceInstUsesWith(BO, NewBO);
2189}
2190
2192 // Attempt to fold binary operators whose operands are simple recurrences.
2193 if (auto *NewBO = foldBinopWithRecurrence(BO))
2194 return NewBO;
2195
2196 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2197 // we are guarding against replicating the binop in >1 predecessor.
2198 // This could miss matching a phi with 2 constant incoming values.
2199 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2200 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2201 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2202 Phi0->getNumOperands() != Phi1->getNumOperands())
2203 return nullptr;
2204
2205 // TODO: Remove the restriction for binop being in the same block as the phis.
2206 if (BO.getParent() != Phi0->getParent() ||
2207 BO.getParent() != Phi1->getParent())
2208 return nullptr;
2209
2210 // Fold if there is at least one specific constant value in phi0 or phi1's
2211 // incoming values that comes from the same block and this specific constant
2212 // value can be used to do optimization for specific binary operator.
2213 // For example:
2214 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2215 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2216 // %add = add i32 %phi0, %phi1
2217 // ==>
2218 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2220 /*AllowRHSConstant*/ false);
2221 if (C) {
2222 SmallVector<Value *, 4> NewIncomingValues;
2223 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2224 auto &Phi0Use = std::get<0>(T);
2225 auto &Phi1Use = std::get<1>(T);
2226 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2227 return false;
2228 Value *Phi0UseV = Phi0Use.get();
2229 Value *Phi1UseV = Phi1Use.get();
2230 if (Phi0UseV == C)
2231 NewIncomingValues.push_back(Phi1UseV);
2232 else if (Phi1UseV == C)
2233 NewIncomingValues.push_back(Phi0UseV);
2234 else
2235 return false;
2236 return true;
2237 };
2238
2239 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2240 CanFoldIncomingValuePair)) {
2241 PHINode *NewPhi =
2242 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2243 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2244 "The number of collected incoming values should equal the number "
2245 "of the original PHINode operands!");
2246 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2247 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2248 return NewPhi;
2249 }
2250 }
2251
2252 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2253 return nullptr;
2254
2255 // Match a pair of incoming constants for one of the predecessor blocks.
2256 BasicBlock *ConstBB, *OtherBB;
2257 Constant *C0, *C1;
2258 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2259 ConstBB = Phi0->getIncomingBlock(0);
2260 OtherBB = Phi0->getIncomingBlock(1);
2261 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2262 ConstBB = Phi0->getIncomingBlock(1);
2263 OtherBB = Phi0->getIncomingBlock(0);
2264 } else {
2265 return nullptr;
2266 }
2267 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2268 return nullptr;
2269
2270 // The block that we are hoisting to must reach here unconditionally.
2271 // Otherwise, we could be speculatively executing an expensive or
2272 // non-speculative op.
2273 auto *PredBlockBranch = dyn_cast<UncondBrInst>(OtherBB->getTerminator());
2274 if (!PredBlockBranch || !DT.isReachableFromEntry(OtherBB))
2275 return nullptr;
2276
2277 // TODO: This check could be tightened to only apply to binops (div/rem) that
2278 // are not safe to speculatively execute. But that could allow hoisting
2279 // potentially expensive instructions (fdiv for example).
2280 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2282 return nullptr;
2283
2284 // Fold constants for the predecessor block with constant incoming values.
2285 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2286 if (!NewC)
2287 return nullptr;
2288
2289 // Make a new binop in the predecessor block with the non-constant incoming
2290 // values.
2291 Builder.SetInsertPoint(PredBlockBranch);
2292 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2293 Phi0->getIncomingValueForBlock(OtherBB),
2294 Phi1->getIncomingValueForBlock(OtherBB));
2295 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2296 NotFoldedNewBO->copyIRFlags(&BO);
2297
2298 // Replace the binop with a phi of the new values. The old phis are dead.
2299 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2300 NewPhi->addIncoming(NewBO, OtherBB);
2301 NewPhi->addIncoming(NewC, ConstBB);
2302 return NewPhi;
2303}
2304
2306 auto TryFoldOperand = [&](unsigned OpIdx,
2307 bool IsOtherParamConst) -> Instruction * {
2308 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(OpIdx)))
2309 return FoldOpIntoSelect(I, Sel, false, !IsOtherParamConst);
2310 if (auto *PN = dyn_cast<PHINode>(I.getOperand(OpIdx)))
2311 return foldOpIntoPhi(I, PN);
2312 return nullptr;
2313 };
2314
2315 if (Instruction *NewI =
2316 TryFoldOperand(/*OpIdx=*/0, isa<Constant>(I.getOperand(1))))
2317 return NewI;
2318 return TryFoldOperand(/*OpIdx=*/1, isa<Constant>(I.getOperand(0)));
2319}
2320
2322 // If this GEP has only 0 indices, it is the same pointer as
2323 // Src. If Src is not a trivial GEP too, don't combine
2324 // the indices.
2325 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2326 !Src.hasOneUse())
2327 return false;
2328 return true;
2329}
2330
2331/// Find a constant NewC that has property:
2332/// shuffle(NewC, ShMask) = C
2333/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2334///
2335/// A 1-to-1 mapping is not required. Example:
2336/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2338 VectorType *NewCTy) {
2339 if (isa<ScalableVectorType>(NewCTy)) {
2340 Constant *Splat = C->getSplatValue();
2341 if (!Splat)
2342 return nullptr;
2344 }
2345
2346 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2347 cast<FixedVectorType>(C->getType())->getNumElements())
2348 return nullptr;
2349
2350 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2351 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2352 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2353 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2354 for (unsigned I = 0; I < NumElts; ++I) {
2355 Constant *CElt = C->getAggregateElement(I);
2356 if (ShMask[I] >= 0) {
2357 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2358 Constant *NewCElt = NewVecC[ShMask[I]];
2359 // Bail out if:
2360 // 1. The constant vector contains a constant expression.
2361 // 2. The shuffle needs an element of the constant vector that can't
2362 // be mapped to a new constant vector.
2363 // 3. This is a widening shuffle that copies elements of V1 into the
2364 // extended elements (extending with poison is allowed).
2365 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2366 I >= NewCNumElts)
2367 return nullptr;
2368 NewVecC[ShMask[I]] = CElt;
2369 }
2370 }
2371 return ConstantVector::get(NewVecC);
2372}
2373
2374// Get the result of `Vector Op Splat` (or Splat Op Vector if \p SplatLHS).
2376 Constant *Splat, bool SplatLHS,
2377 const DataLayout &DL) {
2378 ElementCount EC = cast<VectorType>(Vector->getType())->getElementCount();
2380 Constant *RHS = Vector;
2381 if (!SplatLHS)
2382 std::swap(LHS, RHS);
2383 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
2384}
2385
2386template <Intrinsic::ID SpliceID>
2388 InstCombiner::BuilderTy &Builder) {
2389 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2390 auto CreateBinOpSplice = [&](Value *X, Value *Y, Value *Offset) {
2391 Value *V = Builder.CreateBinOp(Inst.getOpcode(), X, Y, Inst.getName());
2392 if (auto *BO = dyn_cast<BinaryOperator>(V))
2393 BO->copyIRFlags(&Inst);
2394 Module *M = Inst.getModule();
2395 Function *F = Intrinsic::getOrInsertDeclaration(M, SpliceID, V->getType());
2396 return CallInst::Create(F, {V, PoisonValue::get(V->getType()), Offset});
2397 };
2398 Value *V1, *V2, *Offset;
2399 if (match(LHS,
2401 // Op(splice(V1, poison, offset), splice(V2, poison, offset))
2402 // -> splice(Op(V1, V2), poison, offset)
2404 m_Specific(Offset))) &&
2405 (LHS->hasOneUse() || RHS->hasOneUse() ||
2406 (LHS == RHS && LHS->hasNUses(2))))
2407 return CreateBinOpSplice(V1, V2, Offset);
2408
2409 // Op(splice(V1, poison, offset), RHSSplat)
2410 // -> splice(Op(V1, RHSSplat), poison, offset)
2411 if (LHS->hasOneUse() && isSplatValue(RHS))
2412 return CreateBinOpSplice(V1, RHS, Offset);
2413 }
2414 // Op(LHSSplat, splice(V2, poison, offset))
2415 // -> splice(Op(LHSSplat, V2), poison, offset)
2416 else if (isSplatValue(LHS) &&
2418 m_Value(Offset)))))
2419 return CreateBinOpSplice(LHS, V2, Offset);
2420
2421 // TODO: Fold binops of the form
2422 // Op(splice(poison, V1, offset), splice(poison, V2, offset))
2423 // -> splice(poison, Op(V1, V2), offset)
2424
2425 return nullptr;
2426}
2427
2429 if (!isa<VectorType>(Inst.getType()))
2430 return nullptr;
2431
2432 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2433 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2434 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2435 cast<VectorType>(Inst.getType())->getElementCount());
2436 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2437 cast<VectorType>(Inst.getType())->getElementCount());
2438
2439 auto foldConstantsThroughSubVectorInsertSplat =
2440 [&](Value *MaybeSubVector, Value *MaybeSplat,
2441 bool SplatLHS) -> Instruction * {
2442 Value *Idx;
2443 Constant *Splat, *SubVector, *Dest;
2444 if (!match(MaybeSplat, m_ConstantSplat(m_Constant(Splat))) ||
2445 !match(MaybeSubVector,
2446 m_VectorInsert(m_Constant(Dest), m_Constant(SubVector),
2447 m_Value(Idx))))
2448 return nullptr;
2449 SubVector =
2450 constantFoldBinOpWithSplat(Opcode, SubVector, Splat, SplatLHS, DL);
2451 Dest = constantFoldBinOpWithSplat(Opcode, Dest, Splat, SplatLHS, DL);
2452 if (!SubVector || !Dest)
2453 return nullptr;
2454 auto *InsertVector =
2455 Builder.CreateInsertVector(Dest->getType(), Dest, SubVector, Idx);
2456 return replaceInstUsesWith(Inst, InsertVector);
2457 };
2458
2459 // If one operand is a constant splat and the other operand is a
2460 // `vector.insert` where both the destination and subvector are constant,
2461 // apply the operation to both the destination and subvector, returning a new
2462 // constant `vector.insert`. This helps constant folding for scalable vectors.
2463 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2464 /*MaybeSubVector=*/LHS, /*MaybeSplat=*/RHS, /*SplatLHS=*/false))
2465 return Folded;
2466 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2467 /*MaybeSubVector=*/RHS, /*MaybeSplat=*/LHS, /*SplatLHS=*/true))
2468 return Folded;
2469
2470 // If both operands of the binop are vector concatenations, then perform the
2471 // narrow binop on each pair of the source operands followed by concatenation
2472 // of the results.
2473 Value *L0, *L1, *R0, *R1;
2474 ArrayRef<int> Mask;
2475 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2476 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2477 LHS->hasOneUse() && RHS->hasOneUse() &&
2478 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2479 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2480 // This transform does not have the speculative execution constraint as
2481 // below because the shuffle is a concatenation. The new binops are
2482 // operating on exactly the same elements as the existing binop.
2483 // TODO: We could ease the mask requirement to allow different undef lanes,
2484 // but that requires an analysis of the binop-with-undef output value.
2485 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2486 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2487 BO->copyIRFlags(&Inst);
2488 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2489 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2490 BO->copyIRFlags(&Inst);
2491 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2492 }
2493
2494 auto createBinOpReverse = [&](Value *X, Value *Y) {
2495 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2496 if (auto *BO = dyn_cast<BinaryOperator>(V))
2497 BO->copyIRFlags(&Inst);
2498 Module *M = Inst.getModule();
2500 M, Intrinsic::vector_reverse, V->getType());
2501 return CallInst::Create(F, V);
2502 };
2503
2504 // NOTE: Reverse shuffles don't require the speculative execution protection
2505 // below because they don't affect which lanes take part in the computation.
2506
2507 Value *V1, *V2;
2508 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2509 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2510 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2511 (LHS->hasOneUse() || RHS->hasOneUse() ||
2512 (LHS == RHS && LHS->hasNUses(2))))
2513 return createBinOpReverse(V1, V2);
2514
2515 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2516 if (LHS->hasOneUse() && isSplatValue(RHS))
2517 return createBinOpReverse(V1, RHS);
2518 }
2519 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2520 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2521 return createBinOpReverse(LHS, V2);
2522
2523 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2524 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2525 if (auto *BO = dyn_cast<BinaryOperator>(V))
2526 BO->copyIRFlags(&Inst);
2527
2528 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2529 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2530 Module *M = Inst.getModule();
2532 M, Intrinsic::experimental_vp_reverse, V->getType());
2533 return CallInst::Create(F, {V, AllTrueMask, EVL});
2534 };
2535
2536 Value *EVL;
2538 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2539 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2541 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2542 (LHS->hasOneUse() || RHS->hasOneUse() ||
2543 (LHS == RHS && LHS->hasNUses(2))))
2544 return createBinOpVPReverse(V1, V2, EVL);
2545
2546 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2547 if (LHS->hasOneUse() && isSplatValue(RHS))
2548 return createBinOpVPReverse(V1, RHS, EVL);
2549 }
2550 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2551 else if (isSplatValue(LHS) &&
2553 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2554 return createBinOpVPReverse(LHS, V2, EVL);
2555
2556 if (Instruction *Folded =
2558 return Folded;
2559 if (Instruction *Folded =
2561 return Folded;
2562
2563 // It may not be safe to reorder shuffles and things like div, urem, etc.
2564 // because we may trap when executing those ops on unknown vector elements.
2565 // See PR20059.
2567 return nullptr;
2568
2569 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2570 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2571 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2572 BO->copyIRFlags(&Inst);
2573 return new ShuffleVectorInst(XY, M);
2574 };
2575
2576 // If both arguments of the binary operation are shuffles that use the same
2577 // mask and shuffle within a single vector, move the shuffle after the binop.
2578 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2579 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2580 V1->getType() == V2->getType() &&
2581 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2582 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2583 return createBinOpShuffle(V1, V2, Mask);
2584 }
2585
2586 // If both arguments of a commutative binop are select-shuffles that use the
2587 // same mask with commuted operands, the shuffles are unnecessary.
2588 if (Inst.isCommutative() &&
2589 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2590 match(RHS,
2591 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2592 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2593 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2594 // TODO: Allow shuffles that contain undefs in the mask?
2595 // That is legal, but it reduces undef knowledge.
2596 // TODO: Allow arbitrary shuffles by shuffling after binop?
2597 // That might be legal, but we have to deal with poison.
2598 if (LShuf->isSelect() &&
2599 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2600 RShuf->isSelect() &&
2601 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2602 // Example:
2603 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2604 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2605 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2606 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2607 NewBO->copyIRFlags(&Inst);
2608 return NewBO;
2609 }
2610 }
2611
2612 // If one argument is a shuffle within one vector and the other is a constant,
2613 // try moving the shuffle after the binary operation. This canonicalization
2614 // intends to move shuffles closer to other shuffles and binops closer to
2615 // other binops, so they can be folded. It may also enable demanded elements
2616 // transforms.
2617 Constant *C;
2619 m_Mask(Mask))),
2620 m_ImmConstant(C)))) {
2621 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2622 "Shuffle should not change scalar type");
2623
2624 bool ConstOp1 = isa<Constant>(RHS);
2625 if (Constant *NewC =
2627 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2628 // which will cause UB for div/rem. Mask them with a safe constant.
2629 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2630 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2631
2632 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2633 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2634 Value *NewLHS = ConstOp1 ? V1 : NewC;
2635 Value *NewRHS = ConstOp1 ? NewC : V1;
2636 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2637 }
2638 }
2639
2640 // Try to reassociate to sink a splat shuffle after a binary operation.
2641 if (Inst.isAssociative() && Inst.isCommutative()) {
2642 // Canonicalize shuffle operand as LHS.
2643 if (isa<ShuffleVectorInst>(RHS))
2644 std::swap(LHS, RHS);
2645
2646 Value *X;
2647 ArrayRef<int> MaskC;
2648 int SplatIndex;
2649 Value *Y, *OtherOp;
2650 if (!match(LHS,
2651 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2652 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2653 X->getType() != Inst.getType() ||
2654 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2655 return nullptr;
2656
2657 // FIXME: This may not be safe if the analysis allows undef elements. By
2658 // moving 'Y' before the splat shuffle, we are implicitly assuming
2659 // that it is not undef/poison at the splat index.
2660 if (isSplatValue(OtherOp, SplatIndex)) {
2661 std::swap(Y, OtherOp);
2662 } else if (!isSplatValue(Y, SplatIndex)) {
2663 return nullptr;
2664 }
2665
2666 // X and Y are splatted values, so perform the binary operation on those
2667 // values followed by a splat followed by the 2nd binary operation:
2668 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2669 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2670 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2671 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2672 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2673
2674 // Intersect FMF on both new binops. Other (poison-generating) flags are
2675 // dropped to be safe.
2676 if (isa<FPMathOperator>(R)) {
2677 R->copyFastMathFlags(&Inst);
2678 R->andIRFlags(RHS);
2679 }
2680 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2681 NewInstBO->copyIRFlags(R);
2682 return R;
2683 }
2684
2685 return nullptr;
2686}
2687
2688/// Try to narrow the width of a binop if at least 1 operand is an extend of
2689/// of a value. This requires a potentially expensive known bits check to make
2690/// sure the narrow op does not overflow.
2691Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2692 // We need at least one extended operand.
2693 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2694
2695 // If this is a sub, we swap the operands since we always want an extension
2696 // on the RHS. The LHS can be an extension or a constant.
2697 if (BO.getOpcode() == Instruction::Sub)
2698 std::swap(Op0, Op1);
2699
2700 Value *X;
2701 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2702 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2703 return nullptr;
2704
2705 // If both operands are the same extension from the same source type and we
2706 // can eliminate at least one (hasOneUse), this might work.
2707 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2708 Value *Y;
2709 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2710 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2711 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2712 // If that did not match, see if we have a suitable constant operand.
2713 // Truncating and extending must produce the same constant.
2714 Constant *WideC;
2715 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2716 return nullptr;
2717 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2718 if (!NarrowC)
2719 return nullptr;
2720 Y = NarrowC;
2721 }
2722
2723 // Swap back now that we found our operands.
2724 if (BO.getOpcode() == Instruction::Sub)
2725 std::swap(X, Y);
2726
2727 // Both operands have narrow versions. Last step: the math must not overflow
2728 // in the narrow width.
2729 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2730 return nullptr;
2731
2732 // bo (ext X), (ext Y) --> ext (bo X, Y)
2733 // bo (ext X), C --> ext (bo X, C')
2734 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2735 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2736 if (IsSext)
2737 NewBinOp->setHasNoSignedWrap();
2738 else
2739 NewBinOp->setHasNoUnsignedWrap();
2740 }
2741 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2742}
2743
2744/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2745/// transform.
2750
2751/// Thread a GEP operation with constant indices through the constant true/false
2752/// arms of a select.
2754 InstCombiner::BuilderTy &Builder) {
2755 if (!GEP.hasAllConstantIndices())
2756 return nullptr;
2757
2758 Instruction *Sel;
2759 Value *Cond;
2760 Constant *TrueC, *FalseC;
2761 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2762 !match(Sel,
2763 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2764 return nullptr;
2765
2766 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2767 // Propagate 'inbounds' and metadata from existing instructions.
2768 // Note: using IRBuilder to create the constants for efficiency.
2769 SmallVector<Value *, 4> IndexC(GEP.indices());
2770 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2771 Type *Ty = GEP.getSourceElementType();
2772 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2773 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2774 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2775}
2776
2777// Canonicalization:
2778// gep T, (gep i8, base, C1), (Index + C2) into
2779// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2781 GEPOperator *Src,
2782 InstCombinerImpl &IC) {
2783 if (GEP.getNumIndices() != 1)
2784 return nullptr;
2785 auto &DL = IC.getDataLayout();
2786 Value *Base;
2787 const APInt *C1;
2788 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2789 return nullptr;
2790 Value *VarIndex;
2791 const APInt *C2;
2792 Type *PtrTy = Src->getType()->getScalarType();
2793 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2794 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2795 return nullptr;
2796 if (C1->getBitWidth() != IndexSizeInBits ||
2797 C2->getBitWidth() != IndexSizeInBits)
2798 return nullptr;
2799 Type *BaseType = GEP.getSourceElementType();
2801 return nullptr;
2802 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2803 APInt NewOffset = TypeSize * *C2 + *C1;
2804 if (NewOffset.isZero() ||
2805 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2807 if (GEP.hasNoUnsignedWrap() &&
2808 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2809 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2811 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2812 Flags |= GEPNoWrapFlags::inBounds();
2813 }
2814
2815 Value *GEPConst =
2816 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2817 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2818 }
2819
2820 return nullptr;
2821}
2822
2823/// Combine constant offsets separated by variable offsets.
2824/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2826 InstCombinerImpl &IC) {
2827 if (!GEP.hasAllConstantIndices())
2828 return nullptr;
2829
2832 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2833 while (true) {
2834 if (!InnerGEP)
2835 return nullptr;
2836
2837 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2838 if (InnerGEP->hasAllConstantIndices())
2839 break;
2840
2841 if (!InnerGEP->hasOneUse())
2842 return nullptr;
2843
2844 Skipped.push_back(InnerGEP);
2845 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2846 }
2847
2848 // The two constant offset GEPs are directly adjacent: Let normal offset
2849 // merging handle it.
2850 if (Skipped.empty())
2851 return nullptr;
2852
2853 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2854 // if profitable.
2855 if (!InnerGEP->hasOneUse())
2856 return nullptr;
2857
2858 // Don't bother with vector splats.
2859 Type *Ty = GEP.getType();
2860 if (InnerGEP->getType() != Ty)
2861 return nullptr;
2862
2863 const DataLayout &DL = IC.getDataLayout();
2864 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2865 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2866 !InnerGEP->accumulateConstantOffset(DL, Offset))
2867 return nullptr;
2868
2869 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2870 for (GetElementPtrInst *SkippedGEP : Skipped)
2871 SkippedGEP->setNoWrapFlags(NW);
2872
2873 return IC.replaceInstUsesWith(
2874 GEP,
2875 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2876 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2877}
2878
2880 GEPOperator *Src) {
2881 // Combine Indices - If the source pointer to this getelementptr instruction
2882 // is a getelementptr instruction with matching element type, combine the
2883 // indices of the two getelementptr instructions into a single instruction.
2884 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2885 return nullptr;
2886
2887 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2888 return I;
2889
2890 if (auto *I = combineConstantOffsets(GEP, *this))
2891 return I;
2892
2893 if (Src->getResultElementType() != GEP.getSourceElementType())
2894 return nullptr;
2895
2896 // Fold chained GEP with constant base into single GEP:
2897 // gep i8, (gep i8, %base, C1), (select Cond, C2, C3)
2898 // -> gep i8, %base, (select Cond, C1+C2, C1+C3)
2899 if (Src->hasOneUse() && GEP.getNumIndices() == 1 &&
2900 Src->getNumIndices() == 1) {
2901 Value *SrcIdx = *Src->idx_begin();
2902 Value *GEPIdx = *GEP.idx_begin();
2903 const APInt *ConstOffset, *TrueVal, *FalseVal;
2904 Value *Cond;
2905
2906 if ((match(SrcIdx, m_APInt(ConstOffset)) &&
2907 match(GEPIdx,
2908 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal)))) ||
2909 (match(GEPIdx, m_APInt(ConstOffset)) &&
2910 match(SrcIdx,
2911 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal))))) {
2912 auto *Select = isa<SelectInst>(GEPIdx) ? cast<SelectInst>(GEPIdx)
2913 : cast<SelectInst>(SrcIdx);
2914
2915 // Make sure the select has only one use.
2916 if (!Select->hasOneUse())
2917 return nullptr;
2918
2919 if (TrueVal->getBitWidth() != ConstOffset->getBitWidth() ||
2920 FalseVal->getBitWidth() != ConstOffset->getBitWidth())
2921 return nullptr;
2922
2923 APInt NewTrueVal = *ConstOffset + *TrueVal;
2924 APInt NewFalseVal = *ConstOffset + *FalseVal;
2925 Constant *NewTrue = ConstantInt::get(Select->getType(), NewTrueVal);
2926 Constant *NewFalse = ConstantInt::get(Select->getType(), NewFalseVal);
2927 Value *NewSelect = Builder.CreateSelect(
2928 Cond, NewTrue, NewFalse, /*Name=*/"",
2929 /*MDFrom=*/(ProfcheckDisableMetadataFixes ? nullptr : Select));
2930 GEPNoWrapFlags Flags =
2932 return replaceInstUsesWith(GEP,
2933 Builder.CreateGEP(GEP.getResultElementType(),
2934 Src->getPointerOperand(),
2935 NewSelect, "", Flags));
2936 }
2937 }
2938
2939 // Find out whether the last index in the source GEP is a sequential idx.
2940 bool EndsWithSequential = false;
2941 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2942 I != E; ++I)
2943 EndsWithSequential = I.isSequential();
2944 if (!EndsWithSequential)
2945 return nullptr;
2946
2947 // Replace: gep (gep %P, long B), long A, ...
2948 // With: T = long A+B; gep %P, T, ...
2949 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2950 Value *GO1 = GEP.getOperand(1);
2951
2952 // If they aren't the same type, then the input hasn't been processed
2953 // by the loop above yet (which canonicalizes sequential index types to
2954 // intptr_t). Just avoid transforming this until the input has been
2955 // normalized.
2956 if (SO1->getType() != GO1->getType())
2957 return nullptr;
2958
2959 Value *Sum =
2960 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2961 // Only do the combine when we are sure the cost after the
2962 // merge is never more than that before the merge.
2963 if (Sum == nullptr)
2964 return nullptr;
2965
2967 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2968 Indices.push_back(Sum);
2969 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2970
2971 // Don't create GEPs with more than one non-zero index.
2972 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2973 auto *C = dyn_cast<Constant>(Idx);
2974 return !C || !C->isNullValue();
2975 });
2976 if (NumNonZeroIndices > 1)
2977 return nullptr;
2978
2979 return replaceInstUsesWith(
2980 GEP, Builder.CreateGEP(
2981 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2983}
2984
2987 bool &DoesConsume, unsigned Depth) {
2988 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2989 // ~(~(X)) -> X.
2990 Value *A, *B;
2991 if (match(V, m_Not(m_Value(A)))) {
2992 DoesConsume = true;
2993 return A;
2994 }
2995
2996 Constant *C;
2997 // Constants can be considered to be not'ed values.
2998 if (match(V, m_ImmConstant(C)))
2999 return ConstantExpr::getNot(C);
3000
3002 return nullptr;
3003
3004 // The rest of the cases require that we invert all uses so don't bother
3005 // doing the analysis if we know we can't use the result.
3006 if (!WillInvertAllUses)
3007 return nullptr;
3008
3009 // Compares can be inverted if all of their uses are being modified to use
3010 // the ~V.
3011 if (auto *I = dyn_cast<CmpInst>(V)) {
3012 if (Builder != nullptr)
3013 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
3014 I->getOperand(1));
3015 return NonNull;
3016 }
3017
3018 // If `V` is of the form `A + B` then `-1 - V` can be folded into
3019 // `(-1 - B) - A` if we are willing to invert all of the uses.
3020 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
3021 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3022 DoesConsume, Depth))
3023 return Builder ? Builder->CreateSub(BV, A) : NonNull;
3024 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3025 DoesConsume, Depth))
3026 return Builder ? Builder->CreateSub(AV, B) : NonNull;
3027 return nullptr;
3028 }
3029
3030 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
3031 // into `A ^ B` if we are willing to invert all of the uses.
3032 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
3033 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3034 DoesConsume, Depth))
3035 return Builder ? Builder->CreateXor(A, BV) : NonNull;
3036 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3037 DoesConsume, Depth))
3038 return Builder ? Builder->CreateXor(AV, B) : NonNull;
3039 return nullptr;
3040 }
3041
3042 // If `V` is of the form `B - A` then `-1 - V` can be folded into
3043 // `A + (-1 - B)` if we are willing to invert all of the uses.
3044 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
3045 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3046 DoesConsume, Depth))
3047 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
3048 return nullptr;
3049 }
3050
3051 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
3052 // into `A s>> B` if we are willing to invert all of the uses.
3053 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
3054 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3055 DoesConsume, Depth))
3056 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
3057 return nullptr;
3058 }
3059
3060 Value *Cond;
3061 // LogicOps are special in that we canonicalize them at the cost of an
3062 // instruction.
3063 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
3065 // Selects/min/max with invertible operands are freely invertible
3066 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
3067 bool LocalDoesConsume = DoesConsume;
3068 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
3069 LocalDoesConsume, Depth))
3070 return nullptr;
3071 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3072 LocalDoesConsume, Depth)) {
3073 DoesConsume = LocalDoesConsume;
3074 if (Builder != nullptr) {
3075 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3076 DoesConsume, Depth);
3077 assert(NotB != nullptr &&
3078 "Unable to build inverted value for known freely invertable op");
3079 if (auto *II = dyn_cast<IntrinsicInst>(V))
3080 return Builder->CreateBinaryIntrinsic(
3081 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
3082 return Builder->CreateSelect(
3083 Cond, NotA, NotB, "",
3085 }
3086 return NonNull;
3087 }
3088 }
3089
3090 if (PHINode *PN = dyn_cast<PHINode>(V)) {
3091 bool LocalDoesConsume = DoesConsume;
3093 for (Use &U : PN->operands()) {
3094 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
3095 Value *NewIncomingVal = getFreelyInvertedImpl(
3096 U.get(), /*WillInvertAllUses=*/false,
3097 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
3098 if (NewIncomingVal == nullptr)
3099 return nullptr;
3100 // Make sure that we can safely erase the original PHI node.
3101 if (NewIncomingVal == V)
3102 return nullptr;
3103 if (Builder != nullptr)
3104 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
3105 }
3106
3107 DoesConsume = LocalDoesConsume;
3108 if (Builder != nullptr) {
3110 Builder->SetInsertPoint(PN);
3111 PHINode *NewPN =
3112 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
3113 for (auto [Val, Pred] : IncomingValues)
3114 NewPN->addIncoming(Val, Pred);
3115 return NewPN;
3116 }
3117 return NonNull;
3118 }
3119
3120 if (match(V, m_SExtLike(m_Value(A)))) {
3121 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3122 DoesConsume, Depth))
3123 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
3124 return nullptr;
3125 }
3126
3127 if (match(V, m_Trunc(m_Value(A)))) {
3128 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3129 DoesConsume, Depth))
3130 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
3131 return nullptr;
3132 }
3133
3134 // De Morgan's Laws:
3135 // (~(A | B)) -> (~A & ~B)
3136 // (~(A & B)) -> (~A | ~B)
3137 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
3138 bool IsLogical, Value *A,
3139 Value *B) -> Value * {
3140 bool LocalDoesConsume = DoesConsume;
3141 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
3142 LocalDoesConsume, Depth))
3143 return nullptr;
3144 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3145 LocalDoesConsume, Depth)) {
3146 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3147 LocalDoesConsume, Depth);
3148 DoesConsume = LocalDoesConsume;
3149 if (IsLogical)
3150 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3151 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3152 }
3153
3154 return nullptr;
3155 };
3156
3157 if (match(V, m_Or(m_Value(A), m_Value(B))))
3158 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
3159 B);
3160
3161 if (match(V, m_And(m_Value(A), m_Value(B))))
3162 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
3163 B);
3164
3165 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
3166 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
3167 B);
3168
3169 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
3170 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
3171 B);
3172
3173 return nullptr;
3174}
3175
3176/// Return true if we should canonicalize the gep to an i8 ptradd.
3178 Value *PtrOp = GEP.getOperand(0);
3179 Type *GEPEltType = GEP.getSourceElementType();
3180 if (GEPEltType->isIntegerTy(8))
3181 return false;
3182
3183 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3184 // intrinsic. This has better support in BasicAA.
3185 if (GEPEltType->isScalableTy())
3186 return true;
3187
3188 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3189 // together.
3190 if (GEP.getNumIndices() == 1 &&
3191 match(GEP.getOperand(1),
3193 m_Shl(m_Value(), m_ConstantInt())))))
3194 return true;
3195
3196 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3197 // possibly be merged together.
3198 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3199 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3200 any_of(GEP.indices(), [](Value *V) {
3201 const APInt *C;
3202 return match(V, m_APInt(C)) && !C->isZero();
3203 });
3204}
3205
3207 IRBuilderBase &Builder) {
3208 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3209 if (!Op1)
3210 return nullptr;
3211
3212 // Don't fold a GEP into itself through a PHI node. This can only happen
3213 // through the back-edge of a loop. Folding a GEP into itself means that
3214 // the value of the previous iteration needs to be stored in the meantime,
3215 // thus requiring an additional register variable to be live, but not
3216 // actually achieving anything (the GEP still needs to be executed once per
3217 // loop iteration).
3218 if (Op1 == &GEP)
3219 return nullptr;
3220 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3221
3222 int DI = -1;
3223
3224 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3225 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3226 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3227 Op1->getSourceElementType() != Op2->getSourceElementType())
3228 return nullptr;
3229
3230 // As for Op1 above, don't try to fold a GEP into itself.
3231 if (Op2 == &GEP)
3232 return nullptr;
3233
3234 // Keep track of the type as we walk the GEP.
3235 Type *CurTy = nullptr;
3236
3237 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3238 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3239 return nullptr;
3240
3241 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3242 if (DI == -1) {
3243 // We have not seen any differences yet in the GEPs feeding the
3244 // PHI yet, so we record this one if it is allowed to be a
3245 // variable.
3246
3247 // The first two arguments can vary for any GEP, the rest have to be
3248 // static for struct slots
3249 if (J > 1) {
3250 assert(CurTy && "No current type?");
3251 if (CurTy->isStructTy())
3252 return nullptr;
3253 }
3254
3255 DI = J;
3256 } else {
3257 // The GEP is different by more than one input. While this could be
3258 // extended to support GEPs that vary by more than one variable it
3259 // doesn't make sense since it greatly increases the complexity and
3260 // would result in an R+R+R addressing mode which no backend
3261 // directly supports and would need to be broken into several
3262 // simpler instructions anyway.
3263 return nullptr;
3264 }
3265 }
3266
3267 // Sink down a layer of the type for the next iteration.
3268 if (J > 0) {
3269 if (J == 1) {
3270 CurTy = Op1->getSourceElementType();
3271 } else {
3272 CurTy =
3273 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3274 }
3275 }
3276 }
3277
3278 NW &= Op2->getNoWrapFlags();
3279 }
3280
3281 // If not all GEPs are identical we'll have to create a new PHI node.
3282 // Check that the old PHI node has only one use so that it will get
3283 // removed.
3284 if (DI != -1 && !PN->hasOneUse())
3285 return nullptr;
3286
3287 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3288 NewGEP->setNoWrapFlags(NW);
3289
3290 if (DI == -1) {
3291 // All the GEPs feeding the PHI are identical. Clone one down into our
3292 // BB so that it can be merged with the current GEP.
3293 } else {
3294 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3295 // into the current block so it can be merged, and create a new PHI to
3296 // set that index.
3297 PHINode *NewPN;
3298 {
3299 IRBuilderBase::InsertPointGuard Guard(Builder);
3300 Builder.SetInsertPoint(PN);
3301 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3302 PN->getNumOperands());
3303 }
3304
3305 for (auto &I : PN->operands())
3306 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3307 PN->getIncomingBlock(I));
3308
3309 NewGEP->setOperand(DI, NewPN);
3310 }
3311
3312 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3313 return NewGEP;
3314}
3315
3317 Value *PtrOp = GEP.getOperand(0);
3318 SmallVector<Value *, 8> Indices(GEP.indices());
3319 Type *GEPType = GEP.getType();
3320 Type *GEPEltType = GEP.getSourceElementType();
3321 if (Value *V =
3322 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3323 SQ.getWithInstruction(&GEP)))
3324 return replaceInstUsesWith(GEP, V);
3325
3326 // For vector geps, use the generic demanded vector support.
3327 // Skip if GEP return type is scalable. The number of elements is unknown at
3328 // compile-time.
3329 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3330 auto VWidth = GEPFVTy->getNumElements();
3331 APInt PoisonElts(VWidth, 0);
3332 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3333 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3334 PoisonElts)) {
3335 if (V != &GEP)
3336 return replaceInstUsesWith(GEP, V);
3337 return &GEP;
3338 }
3339 }
3340
3341 // Eliminate unneeded casts for indices, and replace indices which displace
3342 // by multiples of a zero size type with zero.
3343 bool MadeChange = false;
3344
3345 // Index width may not be the same width as pointer width.
3346 // Data layout chooses the right type based on supported integer types.
3347 Type *NewScalarIndexTy =
3348 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3349
3351 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3352 ++I, ++GTI) {
3353 // Skip indices into struct types.
3354 if (GTI.isStruct())
3355 continue;
3356
3357 Type *IndexTy = (*I)->getType();
3358 Type *NewIndexType =
3359 IndexTy->isVectorTy()
3360 ? VectorType::get(NewScalarIndexTy,
3361 cast<VectorType>(IndexTy)->getElementCount())
3362 : NewScalarIndexTy;
3363
3364 // If the element type has zero size then any index over it is equivalent
3365 // to an index of zero, so replace it with zero if it is not zero already.
3366 Type *EltTy = GTI.getIndexedType();
3367 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3368 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3369 *I = Constant::getNullValue(NewIndexType);
3370 MadeChange = true;
3371 }
3372
3373 if (IndexTy != NewIndexType) {
3374 // If we are using a wider index than needed for this platform, shrink
3375 // it to what we need. If narrower, sign-extend it to what we need.
3376 // This explicit cast can make subsequent optimizations more obvious.
3377 if (IndexTy->getScalarSizeInBits() <
3378 NewIndexType->getScalarSizeInBits()) {
3379 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3380 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3381 else
3382 *I = Builder.CreateSExt(*I, NewIndexType);
3383 } else {
3384 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3385 GEP.hasNoUnsignedSignedWrap());
3386 }
3387 MadeChange = true;
3388 }
3389 }
3390 if (MadeChange)
3391 return &GEP;
3392
3393 // Canonicalize constant GEPs to i8 type.
3394 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3395 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3396 if (GEP.accumulateConstantOffset(DL, Offset))
3397 return replaceInstUsesWith(
3398 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3399 GEP.getNoWrapFlags()));
3400 }
3401
3403 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3404 Value *NewGEP =
3405 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3406 return replaceInstUsesWith(GEP, NewGEP);
3407 }
3408
3409 // Strip trailing zero indices.
3410 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3411 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3412 return replaceInstUsesWith(
3413 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3414 drop_end(Indices), "", GEP.getNoWrapFlags()));
3415 }
3416
3417 // Strip leading zero indices.
3418 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3419 if (FirstIdx && FirstIdx->isNullValue() &&
3420 !FirstIdx->getType()->isVectorTy()) {
3422 ++GTI;
3423 if (!GTI.isStruct() && GTI.getSequentialElementStride(DL) ==
3424 DL.getTypeAllocSize(GTI.getIndexedType()))
3425 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3426 GEP.getPointerOperand(),
3427 drop_begin(Indices), "",
3428 GEP.getNoWrapFlags()));
3429 }
3430
3431 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3432 // Note that this looses information about undef lanes; we run it after
3433 // demanded bits to partially mitigate that loss.
3434 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3435 return Op->getType()->isVectorTy() && getSplatValue(Op);
3436 })) {
3437 SmallVector<Value *> NewOps;
3438 for (auto &Op : GEP.operands()) {
3439 if (Op->getType()->isVectorTy())
3440 if (Value *Scalar = getSplatValue(Op)) {
3441 NewOps.push_back(Scalar);
3442 continue;
3443 }
3444 NewOps.push_back(Op);
3445 }
3446
3447 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3448 ArrayRef(NewOps).drop_front(), GEP.getName(),
3449 GEP.getNoWrapFlags());
3450 if (!Res->getType()->isVectorTy()) {
3451 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3452 Res = Builder.CreateVectorSplat(EC, Res);
3453 }
3454 return replaceInstUsesWith(GEP, Res);
3455 }
3456
3457 bool SeenNonZeroIndex = false;
3458 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3459 // Ignore one leading zero index.
3460 auto *C = dyn_cast<Constant>(Idx);
3461 if (C && C->isNullValue() && IdxNum == 0)
3462 continue;
3463
3464 if (!SeenNonZeroIndex) {
3465 SeenNonZeroIndex = true;
3466 continue;
3467 }
3468
3469 // GEP has multiple non-zero indices: Split it.
3470 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3471 Value *FrontGEP =
3472 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3473 GEP.getName() + ".split", GEP.getNoWrapFlags());
3474
3475 SmallVector<Value *> BackIndices;
3476 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3477 append_range(BackIndices, drop_begin(Indices, IdxNum));
3479 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3480 BackIndices, GEP.getNoWrapFlags());
3481 }
3482
3483 // Canonicalize gep %T to gep [sizeof(%T) x i8]:
3484 auto IsCanonicalType = [](Type *Ty) {
3485 if (auto *AT = dyn_cast<ArrayType>(Ty))
3486 Ty = AT->getElementType();
3487 return Ty->isIntegerTy(8);
3488 };
3489 if (Indices.size() == 1 && !IsCanonicalType(GEPEltType)) {
3490 TypeSize Scale = DL.getTypeAllocSize(GEPEltType);
3491 assert(!Scale.isScalable() && "Should have been handled earlier");
3492 Type *NewElemTy = Builder.getInt8Ty();
3493 if (Scale.getFixedValue() != 1)
3494 NewElemTy = ArrayType::get(NewElemTy, Scale.getFixedValue());
3495 GEP.setSourceElementType(NewElemTy);
3496 GEP.setResultElementType(NewElemTy);
3497 // Don't bother revisiting the GEP after this change.
3498 MadeIRChange = true;
3499 }
3500
3501 // Check to see if the inputs to the PHI node are getelementptr instructions.
3502 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3503 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3504 return replaceOperand(GEP, 0, NewPtrOp);
3505 }
3506
3507 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3508 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3509 return I;
3510
3511 if (GEP.getNumIndices() == 1) {
3512 unsigned AS = GEP.getPointerAddressSpace();
3513 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3514 DL.getIndexSizeInBits(AS)) {
3515 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3516
3517 if (TyAllocSize == 1) {
3518 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3519 // but only if the result pointer is only used as if it were an integer.
3520 // (The case where the underlying object is the same is handled by
3521 // InstSimplify.)
3522 Value *X = GEP.getPointerOperand();
3523 Value *Y;
3524 if (match(GEP.getOperand(1), m_Sub(m_PtrToIntOrAddr(m_Value(Y)),
3526 GEPType == Y->getType()) {
3527 bool HasNonAddressBits =
3528 DL.getAddressSizeInBits(AS) != DL.getPointerSizeInBits(AS);
3529 bool Changed = GEP.replaceUsesWithIf(Y, [&](Use &U) {
3530 return isa<PtrToAddrInst, ICmpInst>(U.getUser()) ||
3531 (!HasNonAddressBits && isa<PtrToIntInst>(U.getUser()));
3532 });
3533 return Changed ? &GEP : nullptr;
3534 }
3535 } else if (auto *ExactIns =
3536 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3537 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3538 Value *V;
3539 if (ExactIns->isExact()) {
3540 if ((has_single_bit(TyAllocSize) &&
3541 match(GEP.getOperand(1),
3542 m_Shr(m_Value(V),
3543 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3544 match(GEP.getOperand(1),
3545 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3546 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3547 GEP.getPointerOperand(), V,
3548 GEP.getNoWrapFlags());
3549 }
3550 }
3551 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3552 // Try to canonicalize non-i8 element type to i8 if the index is an
3553 // exact instruction. If the index is an exact instruction (div/shr)
3554 // with a constant RHS, we can fold the non-i8 element scale into the
3555 // div/shr (similiar to the mul case, just inverted).
3556 const APInt *C;
3557 std::optional<APInt> NewC;
3558 if (has_single_bit(TyAllocSize) &&
3559 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3560 C->uge(countr_zero(TyAllocSize)))
3561 NewC = *C - countr_zero(TyAllocSize);
3562 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3563 APInt Quot;
3564 uint64_t Rem;
3565 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3566 if (Rem == 0)
3567 NewC = Quot;
3568 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3569 APInt Quot;
3570 int64_t Rem;
3571 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3572 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3573 if (!Quot.isAllOnes() && Rem == 0)
3574 NewC = Quot;
3575 }
3576
3577 if (NewC.has_value()) {
3578 Value *NewOp = Builder.CreateBinOp(
3579 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3580 ConstantInt::get(V->getType(), *NewC));
3581 cast<BinaryOperator>(NewOp)->setIsExact();
3582 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3583 GEP.getPointerOperand(), NewOp,
3584 GEP.getNoWrapFlags());
3585 }
3586 }
3587 }
3588 }
3589 }
3590 // We do not handle pointer-vector geps here.
3591 if (GEPType->isVectorTy())
3592 return nullptr;
3593
3594 if (!GEP.isInBounds()) {
3595 unsigned IdxWidth =
3596 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3597 APInt BasePtrOffset(IdxWidth, 0);
3598 Value *UnderlyingPtrOp =
3599 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3600 bool CanBeNull, CanBeFreed;
3601 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3602 DL, CanBeNull, CanBeFreed);
3603 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3604 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3605 BasePtrOffset.isNonNegative()) {
3606 APInt AllocSize(IdxWidth, DerefBytes);
3607 if (BasePtrOffset.ule(AllocSize)) {
3609 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3610 }
3611 }
3612 }
3613 }
3614
3615 // nusw + nneg -> nuw
3616 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3617 all_of(GEP.indices(), [&](Value *Idx) {
3618 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3619 })) {
3620 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3621 return &GEP;
3622 }
3623
3624 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3625 // to do this after having tried to derive "nuw" above.
3626 if (GEP.getNumIndices() == 1) {
3627 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3628 // geps if transforming into (gep (gep p, x), y).
3629 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3630 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3631 // that x + y does not have unsigned wrap.
3632 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3633 return GEP.getNoWrapFlags();
3634 return GEPNoWrapFlags::none();
3635 };
3636
3637 // Try to replace ADD + GEP with GEP + GEP.
3638 Value *Idx1, *Idx2;
3639 if (match(GEP.getOperand(1),
3640 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3641 // %idx = add i64 %idx1, %idx2
3642 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3643 // as:
3644 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3645 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3646 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3647 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3648 auto *NewPtr =
3649 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3650 Idx1, "", NWFlags);
3651 return replaceInstUsesWith(GEP,
3652 Builder.CreateGEP(GEP.getSourceElementType(),
3653 NewPtr, Idx2, "", NWFlags));
3654 }
3655 ConstantInt *C;
3656 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3657 m_Value(Idx1), m_ConstantInt(C))))))) {
3658 // %add = add nsw i32 %idx1, idx2
3659 // %sidx = sext i32 %add to i64
3660 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3661 // as:
3662 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3663 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3664 bool NUW = match(GEP.getOperand(1),
3666 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3667 auto *NewPtr = Builder.CreateGEP(
3668 GEP.getSourceElementType(), GEP.getPointerOperand(),
3669 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3670 return replaceInstUsesWith(
3671 GEP,
3672 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3673 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3674 "", NWFlags));
3675 }
3676 }
3677
3679 return R;
3680
3681 // srem -> (and/urem) for inbounds+nuw GEP
3682 if (Indices.size() == 1 && GEP.isInBounds() && GEP.hasNoUnsignedWrap()) {
3683 Value *X, *Y;
3684
3685 // Match: idx = srem X, Y -- where Y is a power-of-two value.
3686 if (match(Indices[0], m_OneUse(m_SRem(m_Value(X), m_Value(Y)))) &&
3687 isKnownToBeAPowerOfTwo(Y, /*OrZero=*/true, &GEP)) {
3688 // If GEP is inbounds+nuw, the offset cannot be negative
3689 // -> srem by power-of-two can be treated as urem,
3690 // and urem by power-of-two folds to 'and' later.
3691 // OrZero=true is fine here because division by zero is UB.
3692 Instruction *OldIdxI = cast<Instruction>(Indices[0]);
3693 Value *NewIdx = Builder.CreateURem(X, Y, OldIdxI->getName());
3694
3695 return GetElementPtrInst::Create(GEPEltType, PtrOp, {NewIdx},
3696 GEP.getNoWrapFlags());
3697 }
3698 }
3699
3700 return nullptr;
3701}
3702
3704 Instruction *AI) {
3706 return true;
3707 if (auto *LI = dyn_cast<LoadInst>(V))
3708 return isa<GlobalVariable>(LI->getPointerOperand());
3709 // Two distinct allocations will never be equal.
3710 return isAllocLikeFn(V, &TLI) && V != AI;
3711}
3712
3713/// Given a call CB which uses an address UsedV, return true if we can prove the
3714/// call's only possible effect is storing to V.
3715static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3716 const TargetLibraryInfo &TLI) {
3717 if (!CB.use_empty())
3718 // TODO: add recursion if returned attribute is present
3719 return false;
3720
3721 if (CB.isTerminator())
3722 // TODO: remove implementation restriction
3723 return false;
3724
3725 if (!CB.willReturn() || !CB.doesNotThrow())
3726 return false;
3727
3728 // If the only possible side effect of the call is writing to the alloca,
3729 // and the result isn't used, we can safely remove any reads implied by the
3730 // call including those which might read the alloca itself.
3731 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3732 return Dest && Dest->Ptr == UsedV;
3733}
3734
3735static std::optional<ModRefInfo>
3737 const TargetLibraryInfo &TLI, bool KnowInit) {
3739 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3740 Worklist.push_back(AI);
3742
3743 do {
3744 Instruction *PI = Worklist.pop_back_val();
3745 for (User *U : PI->users()) {
3747 switch (I->getOpcode()) {
3748 default:
3749 // Give up the moment we see something we can't handle.
3750 return std::nullopt;
3751
3752 case Instruction::AddrSpaceCast:
3753 case Instruction::BitCast:
3754 case Instruction::GetElementPtr:
3755 Users.emplace_back(I);
3756 Worklist.push_back(I);
3757 continue;
3758
3759 case Instruction::ICmp: {
3760 ICmpInst *ICI = cast<ICmpInst>(I);
3761 // We can fold eq/ne comparisons with null to false/true, respectively.
3762 // We also fold comparisons in some conditions provided the alloc has
3763 // not escaped (see isNeverEqualToUnescapedAlloc).
3764 if (!ICI->isEquality())
3765 return std::nullopt;
3766 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3767 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3768 return std::nullopt;
3769
3770 // Do not fold compares to aligned_alloc calls, as they may have to
3771 // return null in case the required alignment cannot be satisfied,
3772 // unless we can prove that both alignment and size are valid.
3773 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3774 // Check if alignment and size of a call to aligned_alloc is valid,
3775 // that is alignment is a power-of-2 and the size is a multiple of the
3776 // alignment.
3777 const APInt *Alignment;
3778 const APInt *Size;
3779 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3780 match(CB->getArgOperand(1), m_APInt(Size)) &&
3781 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3782 };
3783 auto *CB = dyn_cast<CallBase>(AI);
3784 LibFunc TheLibFunc;
3785 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3786 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3787 !AlignmentAndSizeKnownValid(CB))
3788 return std::nullopt;
3789 Users.emplace_back(I);
3790 continue;
3791 }
3792
3793 case Instruction::Call:
3794 // Ignore no-op and store intrinsics.
3796 switch (II->getIntrinsicID()) {
3797 default:
3798 return std::nullopt;
3799
3800 case Intrinsic::memmove:
3801 case Intrinsic::memcpy:
3802 case Intrinsic::memset: {
3804 if (MI->isVolatile())
3805 return std::nullopt;
3806 // Note: this could also be ModRef, but we can still interpret that
3807 // as just Mod in that case.
3808 ModRefInfo NewAccess =
3809 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3810 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3811 return std::nullopt;
3812 Access |= NewAccess;
3813 [[fallthrough]];
3814 }
3815 case Intrinsic::assume:
3816 case Intrinsic::invariant_start:
3817 case Intrinsic::invariant_end:
3818 case Intrinsic::lifetime_start:
3819 case Intrinsic::lifetime_end:
3820 case Intrinsic::objectsize:
3821 Users.emplace_back(I);
3822 continue;
3823 case Intrinsic::launder_invariant_group:
3824 case Intrinsic::strip_invariant_group:
3825 Users.emplace_back(I);
3826 Worklist.push_back(I);
3827 continue;
3828 }
3829 }
3830
3831 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3832 getAllocationFamily(I, &TLI) == Family) {
3833 Users.emplace_back(I);
3834 continue;
3835 }
3836
3837 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3838 getAllocationFamily(I, &TLI) == Family) {
3839 Users.emplace_back(I);
3840 Worklist.push_back(I);
3841 continue;
3842 }
3843
3844 if (!isRefSet(Access) &&
3845 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3847 Users.emplace_back(I);
3848 continue;
3849 }
3850
3851 return std::nullopt;
3852
3853 case Instruction::Store: {
3855 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3856 return std::nullopt;
3857 if (isRefSet(Access))
3858 return std::nullopt;
3860 Users.emplace_back(I);
3861 continue;
3862 }
3863
3864 case Instruction::Load: {
3865 LoadInst *LI = cast<LoadInst>(I);
3866 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3867 return std::nullopt;
3868 if (isModSet(Access))
3869 return std::nullopt;
3871 Users.emplace_back(I);
3872 continue;
3873 }
3874 }
3875 llvm_unreachable("missing a return?");
3876 }
3877 } while (!Worklist.empty());
3878
3880 return Access;
3881}
3882
3885
3886 // If we have a malloc call which is only used in any amount of comparisons to
3887 // null and free calls, delete the calls and replace the comparisons with true
3888 // or false as appropriate.
3889
3890 // This is based on the principle that we can substitute our own allocation
3891 // function (which will never return null) rather than knowledge of the
3892 // specific function being called. In some sense this can change the permitted
3893 // outputs of a program (when we convert a malloc to an alloca, the fact that
3894 // the allocation is now on the stack is potentially visible, for example),
3895 // but we believe in a permissible manner.
3897
3898 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3899 // before each store.
3901 std::unique_ptr<DIBuilder> DIB;
3902 if (isa<AllocaInst>(MI)) {
3903 findDbgUsers(&MI, DVRs);
3904 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3905 }
3906
3907 // Determine what getInitialValueOfAllocation would return without actually
3908 // allocating the result.
3909 bool KnowInitUndef = false;
3910 bool KnowInitZero = false;
3911 Constant *Init =
3913 if (Init) {
3914 if (isa<UndefValue>(Init))
3915 KnowInitUndef = true;
3916 else if (Init->isNullValue())
3917 KnowInitZero = true;
3918 }
3919 // The various sanitizers don't actually return undef memory, but rather
3920 // memory initialized with special forms of runtime poison
3921 auto &F = *MI.getFunction();
3922 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3923 F.hasFnAttribute(Attribute::SanitizeAddress))
3924 KnowInitUndef = false;
3925
3926 auto Removable =
3927 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3928 if (Removable) {
3929 for (WeakTrackingVH &User : Users) {
3930 // Lowering all @llvm.objectsize and MTI calls first because they may use
3931 // a bitcast/GEP of the alloca we are removing.
3932 if (!User)
3933 continue;
3934
3936
3938 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3939 SmallVector<Instruction *> InsertedInstructions;
3940 Value *Result = lowerObjectSizeCall(
3941 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3942 for (Instruction *Inserted : InsertedInstructions)
3943 Worklist.add(Inserted);
3944 replaceInstUsesWith(*I, Result);
3946 User = nullptr; // Skip examining in the next loop.
3947 continue;
3948 }
3949 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3950 if (KnowInitZero && isRefSet(*Removable)) {
3952 Builder.SetInsertPoint(MTI);
3953 auto *M = Builder.CreateMemSet(
3954 MTI->getRawDest(),
3955 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3956 MTI->getLength(), MTI->getDestAlign());
3957 M->copyMetadata(*MTI);
3958 }
3959 }
3960 }
3961 }
3962 for (WeakTrackingVH &User : Users) {
3963 if (!User)
3964 continue;
3965
3967
3968 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3970 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3971 C->isFalseWhenEqual()));
3972 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3973 for (auto *DVR : DVRs)
3974 if (DVR->isAddressOfVariable())
3976 } else {
3977 // Casts, GEP, or anything else: we're about to delete this instruction,
3978 // so it can not have any valid uses.
3980 if (isa<LoadInst>(I)) {
3981 assert(KnowInitZero || KnowInitUndef);
3982 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3983 : Constant::getNullValue(I->getType());
3984 } else
3985 Replace = PoisonValue::get(I->getType());
3987 }
3989 }
3990
3992 // Replace invoke with a NOP intrinsic to maintain the original CFG
3993 Module *M = II->getModule();
3994 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3995 auto *NewII = InvokeInst::Create(
3996 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3997 NewII->setDebugLoc(II->getDebugLoc());
3998 }
3999
4000 // Remove debug intrinsics which describe the value contained within the
4001 // alloca. In addition to removing dbg.{declare,addr} which simply point to
4002 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
4003 //
4004 // ```
4005 // define void @foo(i32 %0) {
4006 // %a = alloca i32 ; Deleted.
4007 // store i32 %0, i32* %a
4008 // dbg.value(i32 %0, "arg0") ; Not deleted.
4009 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
4010 // call void @trivially_inlinable_no_op(i32* %a)
4011 // ret void
4012 // }
4013 // ```
4014 //
4015 // This may not be required if we stop describing the contents of allocas
4016 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
4017 // the LowerDbgDeclare utility.
4018 //
4019 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
4020 // "arg0" dbg.value may be stale after the call. However, failing to remove
4021 // the DW_OP_deref dbg.value causes large gaps in location coverage.
4022 //
4023 // FIXME: the Assignment Tracking project has now likely made this
4024 // redundant (and it's sometimes harmful).
4025 for (auto *DVR : DVRs)
4026 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
4027 DVR->eraseFromParent();
4028
4029 return eraseInstFromFunction(MI);
4030 }
4031 return nullptr;
4032}
4033
4034/// Move the call to free before a NULL test.
4035///
4036/// Check if this free is accessed after its argument has been test
4037/// against NULL (property 0).
4038/// If yes, it is legal to move this call in its predecessor block.
4039///
4040/// The move is performed only if the block containing the call to free
4041/// will be removed, i.e.:
4042/// 1. it has only one predecessor P, and P has two successors
4043/// 2. it contains the call, noops, and an unconditional branch
4044/// 3. its successor is the same as its predecessor's successor
4045///
4046/// The profitability is out-of concern here and this function should
4047/// be called only if the caller knows this transformation would be
4048/// profitable (e.g., for code size).
4050 const DataLayout &DL) {
4051 Value *Op = FI.getArgOperand(0);
4052 BasicBlock *FreeInstrBB = FI.getParent();
4053 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
4054
4055 // Validate part of constraint #1: Only one predecessor
4056 // FIXME: We can extend the number of predecessor, but in that case, we
4057 // would duplicate the call to free in each predecessor and it may
4058 // not be profitable even for code size.
4059 if (!PredBB)
4060 return nullptr;
4061
4062 // Validate constraint #2: Does this block contains only the call to
4063 // free, noops, and an unconditional branch?
4064 BasicBlock *SuccBB;
4065 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
4066 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
4067 return nullptr;
4068
4069 // If there are only 2 instructions in the block, at this point,
4070 // this is the call to free and unconditional.
4071 // If there are more than 2 instructions, check that they are noops
4072 // i.e., they won't hurt the performance of the generated code.
4073 if (FreeInstrBB->size() != 2) {
4074 for (const Instruction &Inst : *FreeInstrBB) {
4075 if (&Inst == &FI || &Inst == FreeInstrBBTerminator ||
4077 continue;
4078 auto *Cast = dyn_cast<CastInst>(&Inst);
4079 if (!Cast || !Cast->isNoopCast(DL))
4080 return nullptr;
4081 }
4082 }
4083 // Validate the rest of constraint #1 by matching on the pred branch.
4084 Instruction *TI = PredBB->getTerminator();
4085 BasicBlock *TrueBB, *FalseBB;
4086 CmpPredicate Pred;
4087 if (!match(TI, m_Br(m_ICmp(Pred,
4089 m_Specific(Op->stripPointerCasts())),
4090 m_Zero()),
4091 TrueBB, FalseBB)))
4092 return nullptr;
4093 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
4094 return nullptr;
4095
4096 // Validate constraint #3: Ensure the null case just falls through.
4097 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
4098 return nullptr;
4099 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
4100 "Broken CFG: missing edge from predecessor to successor");
4101
4102 // At this point, we know that everything in FreeInstrBB can be moved
4103 // before TI.
4104 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
4105 if (&Instr == FreeInstrBBTerminator)
4106 break;
4107 Instr.moveBeforePreserving(TI->getIterator());
4108 }
4109 assert(FreeInstrBB->size() == 1 &&
4110 "Only the branch instruction should remain");
4111
4112 // Now that we've moved the call to free before the NULL check, we have to
4113 // remove any attributes on its parameter that imply it's non-null, because
4114 // those attributes might have only been valid because of the NULL check, and
4115 // we can get miscompiles if we keep them. This is conservative if non-null is
4116 // also implied by something other than the NULL check, but it's guaranteed to
4117 // be correct, and the conservativeness won't matter in practice, since the
4118 // attributes are irrelevant for the call to free itself and the pointer
4119 // shouldn't be used after the call.
4120 AttributeList Attrs = FI.getAttributes();
4121 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
4122 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
4123 if (Dereferenceable.isValid()) {
4124 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
4125 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
4126 Attribute::Dereferenceable);
4127 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
4128 }
4129 FI.setAttributes(Attrs);
4130
4131 return &FI;
4132}
4133
4135 // free undef -> unreachable.
4136 if (isa<UndefValue>(Op)) {
4137 // Leave a marker since we can't modify the CFG here.
4139 return eraseInstFromFunction(FI);
4140 }
4141
4142 // If we have 'free null' delete the instruction. This can happen in stl code
4143 // when lots of inlining happens.
4145 return eraseInstFromFunction(FI);
4146
4147 // If we had free(realloc(...)) with no intervening uses, then eliminate the
4148 // realloc() entirely.
4150 if (CI && CI->hasOneUse())
4151 if (Value *ReallocatedOp = getReallocatedOperand(CI))
4152 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
4153
4154 // If we optimize for code size, try to move the call to free before the null
4155 // test so that simplify cfg can remove the empty block and dead code
4156 // elimination the branch. I.e., helps to turn something like:
4157 // if (foo) free(foo);
4158 // into
4159 // free(foo);
4160 //
4161 // Note that we can only do this for 'free' and not for any flavor of
4162 // 'operator delete'; there is no 'operator delete' symbol for which we are
4163 // permitted to invent a call, even if we're passing in a null pointer.
4164 if (MinimizeSize) {
4165 LibFunc Func;
4166 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
4168 return I;
4169 }
4170
4171 return nullptr;
4172}
4173
4175 Value *RetVal = RI.getReturnValue();
4176 if (!RetVal)
4177 return nullptr;
4178
4179 Function *F = RI.getFunction();
4180 Type *RetTy = RetVal->getType();
4181 if (RetTy->isPointerTy()) {
4182 bool HasDereferenceable =
4183 F->getAttributes().getRetDereferenceableBytes() > 0;
4184 if (F->hasRetAttribute(Attribute::NonNull) ||
4185 (HasDereferenceable &&
4187 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4188 return replaceOperand(RI, 0, V);
4189 }
4190 }
4191
4192 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4193 return nullptr;
4194
4195 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
4196 if (ReturnClass == fcNone)
4197 return nullptr;
4198
4199 KnownFPClass KnownClass;
4200 if (SimplifyDemandedFPClass(&RI, 0, ~ReturnClass, KnownClass,
4201 SQ.getWithInstruction(&RI)))
4202 return &RI;
4203
4204 return nullptr;
4205}
4206
4207// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
4209 // Try to remove the previous instruction if it must lead to unreachable.
4210 // This includes instructions like stores and "llvm.assume" that may not get
4211 // removed by simple dead code elimination.
4212 bool Changed = false;
4213 while (Instruction *Prev = I.getPrevNode()) {
4214 // While we theoretically can erase EH, that would result in a block that
4215 // used to start with an EH no longer starting with EH, which is invalid.
4216 // To make it valid, we'd need to fixup predecessors to no longer refer to
4217 // this block, but that changes CFG, which is not allowed in InstCombine.
4218 if (Prev->isEHPad())
4219 break; // Can not drop any more instructions. We're done here.
4220
4222 break; // Can not drop any more instructions. We're done here.
4223 // Otherwise, this instruction can be freely erased,
4224 // even if it is not side-effect free.
4225
4226 // A value may still have uses before we process it here (for example, in
4227 // another unreachable block), so convert those to poison.
4228 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4229 eraseInstFromFunction(*Prev);
4230 Changed = true;
4231 }
4232 return Changed;
4233}
4234
4239
4241 // If this store is the second-to-last instruction in the basic block
4242 // (excluding debug info) and if the block ends with
4243 // an unconditional branch, try to move the store to the successor block.
4244
4245 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4246 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4247 do {
4248 if (BBI != FirstInstr)
4249 --BBI;
4250 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4251
4252 return dyn_cast<StoreInst>(BBI);
4253 };
4254
4255 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4257 return &BI;
4258
4259 return nullptr;
4260}
4261
4264 if (!DeadEdges.insert({From, To}).second)
4265 return;
4266
4267 // Replace phi node operands in successor with poison.
4268 for (PHINode &PN : To->phis())
4269 for (Use &U : PN.incoming_values())
4270 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4271 replaceUse(U, PoisonValue::get(PN.getType()));
4272 addToWorklist(&PN);
4273 MadeIRChange = true;
4274 }
4275
4276 Worklist.push_back(To);
4277}
4278
4279// Under the assumption that I is unreachable, remove it and following
4280// instructions. Changes are reported directly to MadeIRChange.
4283 BasicBlock *BB = I->getParent();
4284 for (Instruction &Inst : make_early_inc_range(
4285 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4286 std::next(I->getReverseIterator())))) {
4287 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4288 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4289 MadeIRChange = true;
4290 }
4291 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4292 continue;
4293 // RemoveDIs: erase debug-info on this instruction manually.
4294 Inst.dropDbgRecords();
4296 MadeIRChange = true;
4297 }
4298
4301 MadeIRChange = true;
4302 for (Value *V : Changed)
4304 }
4305
4306 // Handle potentially dead successors.
4307 for (BasicBlock *Succ : successors(BB))
4308 addDeadEdge(BB, Succ, Worklist);
4309}
4310
4313 while (!Worklist.empty()) {
4314 BasicBlock *BB = Worklist.pop_back_val();
4315 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4316 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4317 }))
4318 continue;
4319
4321 }
4322}
4323
4325 BasicBlock *LiveSucc) {
4327 for (BasicBlock *Succ : successors(BB)) {
4328 // The live successor isn't dead.
4329 if (Succ == LiveSucc)
4330 continue;
4331
4332 addDeadEdge(BB, Succ, Worklist);
4333 }
4334
4336}
4337
4339 // Change br (not X), label True, label False to: br X, label False, True
4340 Value *Cond = BI.getCondition();
4341 Value *X;
4342 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4343 // Swap Destinations and condition...
4344 BI.swapSuccessors();
4345 if (BPI)
4346 BPI->swapSuccEdgesProbabilities(BI.getParent());
4347 return replaceOperand(BI, 0, X);
4348 }
4349
4350 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4351 // This is done by inverting the condition and swapping successors:
4352 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4353 Value *Y;
4354 if (isa<SelectInst>(Cond) &&
4355 match(Cond,
4357 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4358 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4359
4360 // Set weights for the new OR select instruction too.
4362 if (auto *OrInst = dyn_cast<Instruction>(Or)) {
4363 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
4364 SmallVector<uint32_t> Weights;
4365 if (extractBranchWeights(*CondInst, Weights)) {
4366 assert(Weights.size() == 2 &&
4367 "Unexpected number of branch weights!");
4368 std::swap(Weights[0], Weights[1]);
4369 setBranchWeights(*OrInst, Weights, /*IsExpected=*/false);
4370 }
4371 }
4372 }
4373 }
4374 BI.swapSuccessors();
4375 if (BPI)
4376 BPI->swapSuccEdgesProbabilities(BI.getParent());
4377 return replaceOperand(BI, 0, Or);
4378 }
4379
4380 // If the condition is irrelevant, remove the use so that other
4381 // transforms on the condition become more effective.
4382 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4383 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4384
4385 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4386 CmpPredicate Pred;
4387 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4388 !isCanonicalPredicate(Pred)) {
4389 // Swap destinations and condition.
4390 auto *Cmp = cast<CmpInst>(Cond);
4391 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4392 BI.swapSuccessors();
4393 if (BPI)
4394 BPI->swapSuccEdgesProbabilities(BI.getParent());
4395 Worklist.push(Cmp);
4396 return &BI;
4397 }
4398
4399 if (isa<UndefValue>(Cond)) {
4400 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4401 return nullptr;
4402 }
4403 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4404 handlePotentiallyDeadSuccessors(BI.getParent(),
4405 BI.getSuccessor(!CI->getZExtValue()));
4406 return nullptr;
4407 }
4408
4409 // Replace all dominated uses of the condition with true/false
4410 // Ignore constant expressions to avoid iterating over uses on other
4411 // functions.
4412 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4413 for (auto &U : make_early_inc_range(Cond->uses())) {
4414 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4415 if (DT.dominates(Edge0, U)) {
4416 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4417 addToWorklist(cast<Instruction>(U.getUser()));
4418 continue;
4419 }
4420 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4421 if (DT.dominates(Edge1, U)) {
4422 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4423 addToWorklist(cast<Instruction>(U.getUser()));
4424 }
4425 }
4426 }
4427
4428 DC.registerBranch(&BI);
4429 return nullptr;
4430}
4431
4432// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4433// we can prove that both (switch C) and (switch X) go to the default when cond
4434// is false/true.
4437 bool IsTrueArm) {
4438 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4439 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4440 if (!C)
4441 return nullptr;
4442
4443 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4444 if (CstBB != SI.getDefaultDest())
4445 return nullptr;
4446 Value *X = Select->getOperand(3 - CstOpIdx);
4447 CmpPredicate Pred;
4448 const APInt *RHSC;
4449 if (!match(Select->getCondition(),
4450 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4451 return nullptr;
4452 if (IsTrueArm)
4453 Pred = ICmpInst::getInversePredicate(Pred);
4454
4455 // See whether we can replace the select with X
4457 for (auto Case : SI.cases())
4458 if (!CR.contains(Case.getCaseValue()->getValue()))
4459 return nullptr;
4460
4461 return X;
4462}
4463
4465 Value *Cond = SI.getCondition();
4466 Value *Op0;
4467 const APInt *CondOpC;
4468 using InvertFn = std::function<APInt(const APInt &Case, const APInt &C)>;
4469
4470 auto MaybeInvertible = [&](Value *Cond) -> InvertFn {
4471 if (match(Cond, m_Add(m_Value(Op0), m_APInt(CondOpC))))
4472 // Change 'switch (X+C) case Case:' into 'switch (X) case Case-C'.
4473 return [](const APInt &Case, const APInt &C) { return Case - C; };
4474
4475 if (match(Cond, m_Sub(m_APInt(CondOpC), m_Value(Op0))))
4476 // Change 'switch (C-X) case Case:' into 'switch (X) case C-Case'.
4477 return [](const APInt &Case, const APInt &C) { return C - Case; };
4478
4479 if (match(Cond, m_Xor(m_Value(Op0), m_APInt(CondOpC))) &&
4480 !CondOpC->isMinSignedValue() && !CondOpC->isMaxSignedValue())
4481 // Change 'switch (X^C) case Case:' into 'switch (X) case Case^C'.
4482 // Prevent creation of large case values by excluding extremes.
4483 return [](const APInt &Case, const APInt &C) { return Case ^ C; };
4484
4485 return nullptr;
4486 };
4487
4488 // Attempt to invert and simplify the switch condition, as long as the
4489 // condition is not used further, as it may not be profitable otherwise.
4490 if (auto InvertFn = MaybeInvertible(Cond); InvertFn && Cond->hasOneUse()) {
4491 for (auto &Case : SI.cases()) {
4492 const APInt &New = InvertFn(Case.getCaseValue()->getValue(), *CondOpC);
4493 Case.setValue(ConstantInt::get(SI.getContext(), New));
4494 }
4495 return replaceOperand(SI, 0, Op0);
4496 }
4497
4498 uint64_t ShiftAmt;
4499 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4500 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4501 all_of(SI.cases(), [&](const auto &Case) {
4502 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4503 })) {
4504 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4506 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4507 Shl->hasOneUse()) {
4508 Value *NewCond = Op0;
4509 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4510 // If the shift may wrap, we need to mask off the shifted bits.
4511 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4512 NewCond = Builder.CreateAnd(
4513 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4514 }
4515 for (auto Case : SI.cases()) {
4516 const APInt &CaseVal = Case.getCaseValue()->getValue();
4517 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4518 : CaseVal.lshr(ShiftAmt);
4519 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4520 }
4521 return replaceOperand(SI, 0, NewCond);
4522 }
4523 }
4524
4525 // Fold switch(zext/sext(X)) into switch(X) if possible.
4526 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4527 bool IsZExt = isa<ZExtInst>(Cond);
4528 Type *SrcTy = Op0->getType();
4529 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4530
4531 if (all_of(SI.cases(), [&](const auto &Case) {
4532 const APInt &CaseVal = Case.getCaseValue()->getValue();
4533 return IsZExt ? CaseVal.isIntN(NewWidth)
4534 : CaseVal.isSignedIntN(NewWidth);
4535 })) {
4536 for (auto &Case : SI.cases()) {
4537 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4538 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4539 }
4540 return replaceOperand(SI, 0, Op0);
4541 }
4542 }
4543
4544 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4545 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4546 if (Value *V =
4547 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4548 return replaceOperand(SI, 0, V);
4549 if (Value *V =
4550 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4551 return replaceOperand(SI, 0, V);
4552 }
4553
4554 KnownBits Known = computeKnownBits(Cond, &SI);
4555 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4556 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4557
4558 // Compute the number of leading bits we can ignore.
4559 // TODO: A better way to determine this would use ComputeNumSignBits().
4560 for (const auto &C : SI.cases()) {
4561 LeadingKnownZeros =
4562 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4563 LeadingKnownOnes =
4564 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4565 }
4566
4567 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4568
4569 // Shrink the condition operand if the new type is smaller than the old type.
4570 // But do not shrink to a non-standard type, because backend can't generate
4571 // good code for that yet.
4572 // TODO: We can make it aggressive again after fixing PR39569.
4573 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4574 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4575 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4576 Builder.SetInsertPoint(&SI);
4577 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4578
4579 for (auto Case : SI.cases()) {
4580 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4581 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4582 }
4583 return replaceOperand(SI, 0, NewCond);
4584 }
4585
4586 if (isa<UndefValue>(Cond)) {
4587 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4588 return nullptr;
4589 }
4590 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4592 SI.findCaseValue(CI)->getCaseSuccessor());
4593 return nullptr;
4594 }
4595
4596 return nullptr;
4597}
4598
4600InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4602 if (!WO)
4603 return nullptr;
4604
4605 Intrinsic::ID OvID = WO->getIntrinsicID();
4606 const APInt *C = nullptr;
4607 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4608 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4609 OvID == Intrinsic::umul_with_overflow)) {
4610 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4611 if (C->isAllOnes())
4612 return BinaryOperator::CreateNeg(WO->getLHS());
4613 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4614 if (C->isPowerOf2()) {
4615 return BinaryOperator::CreateShl(
4616 WO->getLHS(),
4617 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4618 }
4619 }
4620 }
4621
4622 // We're extracting from an overflow intrinsic. See if we're the only user.
4623 // That allows us to simplify multiple result intrinsics to simpler things
4624 // that just get one value.
4625 if (!WO->hasOneUse())
4626 return nullptr;
4627
4628 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4629 // and replace it with a traditional binary instruction.
4630 if (*EV.idx_begin() == 0) {
4631 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4632 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4633 // Replace the old instruction's uses with poison.
4634 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4636 return BinaryOperator::Create(BinOp, LHS, RHS);
4637 }
4638
4639 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4640
4641 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4642 if (OvID == Intrinsic::usub_with_overflow)
4643 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4644
4645 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4646 // +1 is not possible because we assume signed values.
4647 if (OvID == Intrinsic::smul_with_overflow &&
4648 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4649 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4650
4651 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4652 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4653 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4654 // Only handle even bitwidths for performance reasons.
4655 if (BitWidth % 2 == 0)
4656 return new ICmpInst(
4657 ICmpInst::ICMP_UGT, WO->getLHS(),
4658 ConstantInt::get(WO->getLHS()->getType(),
4660 }
4661
4662 // If only the overflow result is used, and the right hand side is a
4663 // constant (or constant splat), we can remove the intrinsic by directly
4664 // checking for overflow.
4665 if (C) {
4666 // Compute the no-wrap range for LHS given RHS=C, then construct an
4667 // equivalent icmp, potentially using an offset.
4668 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4669 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4670
4671 CmpInst::Predicate Pred;
4672 APInt NewRHSC, Offset;
4673 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4674 auto *OpTy = WO->getRHS()->getType();
4675 auto *NewLHS = WO->getLHS();
4676 if (Offset != 0)
4677 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4678 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4679 ConstantInt::get(OpTy, NewRHSC));
4680 }
4681
4682 return nullptr;
4683}
4684
4687 InstCombiner::BuilderTy &Builder) {
4688 // Helper to fold frexp of select to select of frexp.
4689
4690 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4691 return nullptr;
4693 Value *TrueVal = SelectInst->getTrueValue();
4694 Value *FalseVal = SelectInst->getFalseValue();
4695
4696 const APFloat *ConstVal = nullptr;
4697 Value *VarOp = nullptr;
4698 bool ConstIsTrue = false;
4699
4700 if (match(TrueVal, m_APFloat(ConstVal))) {
4701 VarOp = FalseVal;
4702 ConstIsTrue = true;
4703 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4704 VarOp = TrueVal;
4705 ConstIsTrue = false;
4706 } else {
4707 return nullptr;
4708 }
4709
4710 Builder.SetInsertPoint(&EV);
4711
4712 CallInst *NewFrexp =
4713 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4714 NewFrexp->copyIRFlags(FrexpCall);
4715
4716 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4717
4718 int Exp;
4719 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4720
4721 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4722
4723 Value *NewSel = Builder.CreateSelectFMF(
4724 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4725 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4726 return NewSel;
4727}
4729 Value *Agg = EV.getAggregateOperand();
4730
4731 if (!EV.hasIndices())
4732 return replaceInstUsesWith(EV, Agg);
4733
4734 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4735 SQ.getWithInstruction(&EV)))
4736 return replaceInstUsesWith(EV, V);
4737
4738 Value *Cond, *TrueVal, *FalseVal;
4740 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4741 auto *SelInst =
4742 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4743 if (Value *Result =
4744 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4745 return replaceInstUsesWith(EV, Result);
4746 }
4748 // We're extracting from an insertvalue instruction, compare the indices
4749 const unsigned *exti, *exte, *insi, *inse;
4750 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4751 exte = EV.idx_end(), inse = IV->idx_end();
4752 exti != exte && insi != inse;
4753 ++exti, ++insi) {
4754 if (*insi != *exti)
4755 // The insert and extract both reference distinctly different elements.
4756 // This means the extract is not influenced by the insert, and we can
4757 // replace the aggregate operand of the extract with the aggregate
4758 // operand of the insert. i.e., replace
4759 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4760 // %E = extractvalue { i32, { i32 } } %I, 0
4761 // with
4762 // %E = extractvalue { i32, { i32 } } %A, 0
4763 return ExtractValueInst::Create(IV->getAggregateOperand(),
4764 EV.getIndices());
4765 }
4766 if (exti == exte && insi == inse)
4767 // Both iterators are at the end: Index lists are identical. Replace
4768 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4769 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4770 // with "i32 42"
4771 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4772 if (exti == exte) {
4773 // The extract list is a prefix of the insert list. i.e. replace
4774 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4775 // %E = extractvalue { i32, { i32 } } %I, 1
4776 // with
4777 // %X = extractvalue { i32, { i32 } } %A, 1
4778 // %E = insertvalue { i32 } %X, i32 42, 0
4779 // by switching the order of the insert and extract (though the
4780 // insertvalue should be left in, since it may have other uses).
4781 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4782 EV.getIndices());
4783 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4784 ArrayRef(insi, inse));
4785 }
4786 if (insi == inse)
4787 // The insert list is a prefix of the extract list
4788 // We can simply remove the common indices from the extract and make it
4789 // operate on the inserted value instead of the insertvalue result.
4790 // i.e., replace
4791 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4792 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4793 // with
4794 // %E extractvalue { i32 } { i32 42 }, 0
4795 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4796 ArrayRef(exti, exte));
4797 }
4798
4799 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4800 return R;
4801
4802 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4803 // Bail out if the aggregate contains scalable vector type
4804 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4805 STy && STy->isScalableTy())
4806 return nullptr;
4807
4808 // If the (non-volatile) load only has one use, we can rewrite this to a
4809 // load from a GEP. This reduces the size of the load. If a load is used
4810 // only by extractvalue instructions then this either must have been
4811 // optimized before, or it is a struct with padding, in which case we
4812 // don't want to do the transformation as it loses padding knowledge.
4813 if (L->isSimple() && L->hasOneUse()) {
4814 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4815 SmallVector<Value*, 4> Indices;
4816 // Prefix an i32 0 since we need the first element.
4817 Indices.push_back(Builder.getInt32(0));
4818 for (unsigned Idx : EV.indices())
4819 Indices.push_back(Builder.getInt32(Idx));
4820
4821 // We need to insert these at the location of the old load, not at that of
4822 // the extractvalue.
4823 Builder.SetInsertPoint(L);
4824 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4825 L->getPointerOperand(), Indices);
4826 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4827 // Whatever aliasing information we had for the orignal load must also
4828 // hold for the smaller load, so propagate the annotations.
4829 NL->setAAMetadata(L->getAAMetadata());
4830 // Returning the load directly will cause the main loop to insert it in
4831 // the wrong spot, so use replaceInstUsesWith().
4832 return replaceInstUsesWith(EV, NL);
4833 }
4834 }
4835
4836 if (auto *PN = dyn_cast<PHINode>(Agg))
4837 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4838 return Res;
4839
4840 // Canonicalize extract (select Cond, TV, FV)
4841 // -> select cond, (extract TV), (extract FV)
4842 if (auto *SI = dyn_cast<SelectInst>(Agg))
4843 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4844 return R;
4845
4846 // We could simplify extracts from other values. Note that nested extracts may
4847 // already be simplified implicitly by the above: extract (extract (insert) )
4848 // will be translated into extract ( insert ( extract ) ) first and then just
4849 // the value inserted, if appropriate. Similarly for extracts from single-use
4850 // loads: extract (extract (load)) will be translated to extract (load (gep))
4851 // and if again single-use then via load (gep (gep)) to load (gep).
4852 // However, double extracts from e.g. function arguments or return values
4853 // aren't handled yet.
4854 return nullptr;
4855}
4856
4857/// Return 'true' if the given typeinfo will match anything.
4858static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4859 switch (Personality) {
4863 // The GCC C EH and Rust personality only exists to support cleanups, so
4864 // it's not clear what the semantics of catch clauses are.
4865 return false;
4867 return false;
4869 // While __gnat_all_others_value will match any Ada exception, it doesn't
4870 // match foreign exceptions (or didn't, before gcc-4.7).
4871 return false;
4882 return TypeInfo->isNullValue();
4883 }
4884 llvm_unreachable("invalid enum");
4885}
4886
4887static bool shorter_filter(const Value *LHS, const Value *RHS) {
4888 return
4889 cast<ArrayType>(LHS->getType())->getNumElements()
4890 <
4891 cast<ArrayType>(RHS->getType())->getNumElements();
4892}
4893
4895 // The logic here should be correct for any real-world personality function.
4896 // However if that turns out not to be true, the offending logic can always
4897 // be conditioned on the personality function, like the catch-all logic is.
4898 EHPersonality Personality =
4899 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4900
4901 // Simplify the list of clauses, eg by removing repeated catch clauses
4902 // (these are often created by inlining).
4903 bool MakeNewInstruction = false; // If true, recreate using the following:
4904 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4905 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4906
4907 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4908 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4909 bool isLastClause = i + 1 == e;
4910 if (LI.isCatch(i)) {
4911 // A catch clause.
4912 Constant *CatchClause = LI.getClause(i);
4913 Constant *TypeInfo = CatchClause->stripPointerCasts();
4914
4915 // If we already saw this clause, there is no point in having a second
4916 // copy of it.
4917 if (AlreadyCaught.insert(TypeInfo).second) {
4918 // This catch clause was not already seen.
4919 NewClauses.push_back(CatchClause);
4920 } else {
4921 // Repeated catch clause - drop the redundant copy.
4922 MakeNewInstruction = true;
4923 }
4924
4925 // If this is a catch-all then there is no point in keeping any following
4926 // clauses or marking the landingpad as having a cleanup.
4927 if (isCatchAll(Personality, TypeInfo)) {
4928 if (!isLastClause)
4929 MakeNewInstruction = true;
4930 CleanupFlag = false;
4931 break;
4932 }
4933 } else {
4934 // A filter clause. If any of the filter elements were already caught
4935 // then they can be dropped from the filter. It is tempting to try to
4936 // exploit the filter further by saying that any typeinfo that does not
4937 // occur in the filter can't be caught later (and thus can be dropped).
4938 // However this would be wrong, since typeinfos can match without being
4939 // equal (for example if one represents a C++ class, and the other some
4940 // class derived from it).
4941 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4942 Constant *FilterClause = LI.getClause(i);
4943 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4944 unsigned NumTypeInfos = FilterType->getNumElements();
4945
4946 // An empty filter catches everything, so there is no point in keeping any
4947 // following clauses or marking the landingpad as having a cleanup. By
4948 // dealing with this case here the following code is made a bit simpler.
4949 if (!NumTypeInfos) {
4950 NewClauses.push_back(FilterClause);
4951 if (!isLastClause)
4952 MakeNewInstruction = true;
4953 CleanupFlag = false;
4954 break;
4955 }
4956
4957 bool MakeNewFilter = false; // If true, make a new filter.
4958 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4959 if (isa<ConstantAggregateZero>(FilterClause)) {
4960 // Not an empty filter - it contains at least one null typeinfo.
4961 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4962 Constant *TypeInfo =
4964 // If this typeinfo is a catch-all then the filter can never match.
4965 if (isCatchAll(Personality, TypeInfo)) {
4966 // Throw the filter away.
4967 MakeNewInstruction = true;
4968 continue;
4969 }
4970
4971 // There is no point in having multiple copies of this typeinfo, so
4972 // discard all but the first copy if there is more than one.
4973 NewFilterElts.push_back(TypeInfo);
4974 if (NumTypeInfos > 1)
4975 MakeNewFilter = true;
4976 } else {
4977 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4978 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4979 NewFilterElts.reserve(NumTypeInfos);
4980
4981 // Remove any filter elements that were already caught or that already
4982 // occurred in the filter. While there, see if any of the elements are
4983 // catch-alls. If so, the filter can be discarded.
4984 bool SawCatchAll = false;
4985 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4986 Constant *Elt = Filter->getOperand(j);
4987 Constant *TypeInfo = Elt->stripPointerCasts();
4988 if (isCatchAll(Personality, TypeInfo)) {
4989 // This element is a catch-all. Bail out, noting this fact.
4990 SawCatchAll = true;
4991 break;
4992 }
4993
4994 // Even if we've seen a type in a catch clause, we don't want to
4995 // remove it from the filter. An unexpected type handler may be
4996 // set up for a call site which throws an exception of the same
4997 // type caught. In order for the exception thrown by the unexpected
4998 // handler to propagate correctly, the filter must be correctly
4999 // described for the call site.
5000 //
5001 // Example:
5002 //
5003 // void unexpected() { throw 1;}
5004 // void foo() throw (int) {
5005 // std::set_unexpected(unexpected);
5006 // try {
5007 // throw 2.0;
5008 // } catch (int i) {}
5009 // }
5010
5011 // There is no point in having multiple copies of the same typeinfo in
5012 // a filter, so only add it if we didn't already.
5013 if (SeenInFilter.insert(TypeInfo).second)
5014 NewFilterElts.push_back(cast<Constant>(Elt));
5015 }
5016 // A filter containing a catch-all cannot match anything by definition.
5017 if (SawCatchAll) {
5018 // Throw the filter away.
5019 MakeNewInstruction = true;
5020 continue;
5021 }
5022
5023 // If we dropped something from the filter, make a new one.
5024 if (NewFilterElts.size() < NumTypeInfos)
5025 MakeNewFilter = true;
5026 }
5027 if (MakeNewFilter) {
5028 FilterType = ArrayType::get(FilterType->getElementType(),
5029 NewFilterElts.size());
5030 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
5031 MakeNewInstruction = true;
5032 }
5033
5034 NewClauses.push_back(FilterClause);
5035
5036 // If the new filter is empty then it will catch everything so there is
5037 // no point in keeping any following clauses or marking the landingpad
5038 // as having a cleanup. The case of the original filter being empty was
5039 // already handled above.
5040 if (MakeNewFilter && !NewFilterElts.size()) {
5041 assert(MakeNewInstruction && "New filter but not a new instruction!");
5042 CleanupFlag = false;
5043 break;
5044 }
5045 }
5046 }
5047
5048 // If several filters occur in a row then reorder them so that the shortest
5049 // filters come first (those with the smallest number of elements). This is
5050 // advantageous because shorter filters are more likely to match, speeding up
5051 // unwinding, but mostly because it increases the effectiveness of the other
5052 // filter optimizations below.
5053 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
5054 unsigned j;
5055 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
5056 for (j = i; j != e; ++j)
5057 if (!isa<ArrayType>(NewClauses[j]->getType()))
5058 break;
5059
5060 // Check whether the filters are already sorted by length. We need to know
5061 // if sorting them is actually going to do anything so that we only make a
5062 // new landingpad instruction if it does.
5063 for (unsigned k = i; k + 1 < j; ++k)
5064 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
5065 // Not sorted, so sort the filters now. Doing an unstable sort would be
5066 // correct too but reordering filters pointlessly might confuse users.
5067 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
5069 MakeNewInstruction = true;
5070 break;
5071 }
5072
5073 // Look for the next batch of filters.
5074 i = j + 1;
5075 }
5076
5077 // If typeinfos matched if and only if equal, then the elements of a filter L
5078 // that occurs later than a filter F could be replaced by the intersection of
5079 // the elements of F and L. In reality two typeinfos can match without being
5080 // equal (for example if one represents a C++ class, and the other some class
5081 // derived from it) so it would be wrong to perform this transform in general.
5082 // However the transform is correct and useful if F is a subset of L. In that
5083 // case L can be replaced by F, and thus removed altogether since repeating a
5084 // filter is pointless. So here we look at all pairs of filters F and L where
5085 // L follows F in the list of clauses, and remove L if every element of F is
5086 // an element of L. This can occur when inlining C++ functions with exception
5087 // specifications.
5088 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
5089 // Examine each filter in turn.
5090 Value *Filter = NewClauses[i];
5091 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
5092 if (!FTy)
5093 // Not a filter - skip it.
5094 continue;
5095 unsigned FElts = FTy->getNumElements();
5096 // Examine each filter following this one. Doing this backwards means that
5097 // we don't have to worry about filters disappearing under us when removed.
5098 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
5099 Value *LFilter = NewClauses[j];
5100 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
5101 if (!LTy)
5102 // Not a filter - skip it.
5103 continue;
5104 // If Filter is a subset of LFilter, i.e. every element of Filter is also
5105 // an element of LFilter, then discard LFilter.
5106 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
5107 // If Filter is empty then it is a subset of LFilter.
5108 if (!FElts) {
5109 // Discard LFilter.
5110 NewClauses.erase(J);
5111 MakeNewInstruction = true;
5112 // Move on to the next filter.
5113 continue;
5114 }
5115 unsigned LElts = LTy->getNumElements();
5116 // If Filter is longer than LFilter then it cannot be a subset of it.
5117 if (FElts > LElts)
5118 // Move on to the next filter.
5119 continue;
5120 // At this point we know that LFilter has at least one element.
5121 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
5122 // Filter is a subset of LFilter iff Filter contains only zeros (as we
5123 // already know that Filter is not longer than LFilter).
5125 assert(FElts <= LElts && "Should have handled this case earlier!");
5126 // Discard LFilter.
5127 NewClauses.erase(J);
5128 MakeNewInstruction = true;
5129 }
5130 // Move on to the next filter.
5131 continue;
5132 }
5133 ConstantArray *LArray = cast<ConstantArray>(LFilter);
5134 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
5135 // Since Filter is non-empty and contains only zeros, it is a subset of
5136 // LFilter iff LFilter contains a zero.
5137 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
5138 for (unsigned l = 0; l != LElts; ++l)
5139 if (LArray->getOperand(l)->isNullValue()) {
5140 // LFilter contains a zero - discard it.
5141 NewClauses.erase(J);
5142 MakeNewInstruction = true;
5143 break;
5144 }
5145 // Move on to the next filter.
5146 continue;
5147 }
5148 // At this point we know that both filters are ConstantArrays. Loop over
5149 // operands to see whether every element of Filter is also an element of
5150 // LFilter. Since filters tend to be short this is probably faster than
5151 // using a method that scales nicely.
5153 bool AllFound = true;
5154 for (unsigned f = 0; f != FElts; ++f) {
5155 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
5156 AllFound = false;
5157 for (unsigned l = 0; l != LElts; ++l) {
5158 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
5159 if (LTypeInfo == FTypeInfo) {
5160 AllFound = true;
5161 break;
5162 }
5163 }
5164 if (!AllFound)
5165 break;
5166 }
5167 if (AllFound) {
5168 // Discard LFilter.
5169 NewClauses.erase(J);
5170 MakeNewInstruction = true;
5171 }
5172 // Move on to the next filter.
5173 }
5174 }
5175
5176 // If we changed any of the clauses, replace the old landingpad instruction
5177 // with a new one.
5178 if (MakeNewInstruction) {
5180 NewClauses.size());
5181 for (Constant *C : NewClauses)
5182 NLI->addClause(C);
5183 // A landing pad with no clauses must have the cleanup flag set. It is
5184 // theoretically possible, though highly unlikely, that we eliminated all
5185 // clauses. If so, force the cleanup flag to true.
5186 if (NewClauses.empty())
5187 CleanupFlag = true;
5188 NLI->setCleanup(CleanupFlag);
5189 return NLI;
5190 }
5191
5192 // Even if none of the clauses changed, we may nonetheless have understood
5193 // that the cleanup flag is pointless. Clear it if so.
5194 if (LI.isCleanup() != CleanupFlag) {
5195 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
5196 LI.setCleanup(CleanupFlag);
5197 return &LI;
5198 }
5199
5200 return nullptr;
5201}
5202
5203Value *
5205 // Try to push freeze through instructions that propagate but don't produce
5206 // poison as far as possible. If an operand of freeze follows three
5207 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
5208 // guaranteed-non-poison operands then push the freeze through to the one
5209 // operand that is not guaranteed non-poison. The actual transform is as
5210 // follows.
5211 // Op1 = ... ; Op1 can be posion
5212 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
5213 // ; single guaranteed-non-poison operands
5214 // ... = Freeze(Op0)
5215 // =>
5216 // Op1 = ...
5217 // Op1.fr = Freeze(Op1)
5218 // ... = Inst(Op1.fr, NonPoisonOps...)
5219 auto *OrigOp = OrigFI.getOperand(0);
5220 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
5221
5222 // While we could change the other users of OrigOp to use freeze(OrigOp), that
5223 // potentially reduces their optimization potential, so let's only do this iff
5224 // the OrigOp is only used by the freeze.
5225 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
5226 return nullptr;
5227
5228 // We can't push the freeze through an instruction which can itself create
5229 // poison. If the only source of new poison is flags, we can simply
5230 // strip them (since we know the only use is the freeze and nothing can
5231 // benefit from them.)
5233 /*ConsiderFlagsAndMetadata*/ false))
5234 return nullptr;
5235
5236 // If operand is guaranteed not to be poison, there is no need to add freeze
5237 // to the operand. So we first find the operand that is not guaranteed to be
5238 // poison.
5239 Value *MaybePoisonOperand = nullptr;
5240 for (Value *V : OrigOpInst->operands()) {
5242 // Treat identical operands as a single operand.
5243 (MaybePoisonOperand && MaybePoisonOperand == V))
5244 continue;
5245 if (!MaybePoisonOperand)
5246 MaybePoisonOperand = V;
5247 else
5248 return nullptr;
5249 }
5250
5251 OrigOpInst->dropPoisonGeneratingAnnotations();
5252
5253 // If all operands are guaranteed to be non-poison, we can drop freeze.
5254 if (!MaybePoisonOperand)
5255 return OrigOp;
5256
5257 Builder.SetInsertPoint(OrigOpInst);
5258 Value *FrozenMaybePoisonOperand = Builder.CreateFreeze(
5259 MaybePoisonOperand, MaybePoisonOperand->getName() + ".fr");
5260
5261 OrigOpInst->replaceUsesOfWith(MaybePoisonOperand, FrozenMaybePoisonOperand);
5262 return OrigOp;
5263}
5264
5266 PHINode *PN) {
5267 // Detect whether this is a recurrence with a start value and some number of
5268 // backedge values. We'll check whether we can push the freeze through the
5269 // backedge values (possibly dropping poison flags along the way) until we
5270 // reach the phi again. In that case, we can move the freeze to the start
5271 // value.
5272 Use *StartU = nullptr;
5274 for (Use &U : PN->incoming_values()) {
5275 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5276 // Add backedge value to worklist.
5277 Worklist.push_back(U.get());
5278 continue;
5279 }
5280
5281 // Don't bother handling multiple start values.
5282 if (StartU)
5283 return nullptr;
5284 StartU = &U;
5285 }
5286
5287 if (!StartU || Worklist.empty())
5288 return nullptr; // Not a recurrence.
5289
5290 Value *StartV = StartU->get();
5291 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5292 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5293 // We can't insert freeze if the start value is the result of the
5294 // terminator (e.g. an invoke).
5295 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5296 return nullptr;
5297
5300 while (!Worklist.empty()) {
5301 Value *V = Worklist.pop_back_val();
5302 if (!Visited.insert(V).second)
5303 continue;
5304
5305 if (Visited.size() > 32)
5306 return nullptr; // Limit the total number of values we inspect.
5307
5308 // Assume that PN is non-poison, because it will be after the transform.
5309 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5310 continue;
5311
5314 /*ConsiderFlagsAndMetadata*/ false))
5315 return nullptr;
5316
5317 DropFlags.push_back(I);
5318 append_range(Worklist, I->operands());
5319 }
5320
5321 for (Instruction *I : DropFlags)
5322 I->dropPoisonGeneratingAnnotations();
5323
5324 if (StartNeedsFreeze) {
5325 Builder.SetInsertPoint(StartBB->getTerminator());
5326 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5327 StartV->getName() + ".fr");
5328 replaceUse(*StartU, FrozenStartV);
5329 }
5330 return replaceInstUsesWith(FI, PN);
5331}
5332
5334 Value *Op = FI.getOperand(0);
5335
5336 if (isa<Constant>(Op) || Op->hasOneUse())
5337 return false;
5338
5339 // Move the freeze directly after the definition of its operand, so that
5340 // it dominates the maximum number of uses. Note that it may not dominate
5341 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5342 // the normal/default destination. This is why the domination check in the
5343 // replacement below is still necessary.
5344 BasicBlock::iterator MoveBefore;
5345 if (isa<Argument>(Op)) {
5346 MoveBefore =
5348 } else {
5349 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5350 if (!MoveBeforeOpt)
5351 return false;
5352 MoveBefore = *MoveBeforeOpt;
5353 }
5354
5355 // Re-point iterator to come after any debug-info records.
5356 MoveBefore.setHeadBit(false);
5357
5358 bool Changed = false;
5359 if (&FI != &*MoveBefore) {
5360 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5361 Changed = true;
5362 }
5363
5365 Changed |= Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5366 if (!DT.dominates(&FI, U))
5367 return false;
5368
5369 Users.push_back(U.getUser());
5370 return true;
5371 });
5372
5373 for (auto *U : Users) {
5374 for (auto &AssumeVH : AC.assumptionsFor(U)) {
5375 if (!AssumeVH)
5376 continue;
5377 AC.updateAffectedValues(cast<AssumeInst>(AssumeVH));
5378 }
5379 }
5380
5381 return Changed;
5382}
5383
5384// Check if any direct or bitcast user of this value is a shuffle instruction.
5386 for (auto *U : V->users()) {
5388 return true;
5389 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5390 return true;
5391 }
5392 return false;
5393}
5394
5396 Value *Op0 = I.getOperand(0);
5397
5398 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5399 return replaceInstUsesWith(I, V);
5400
5401 // freeze (phi const, x) --> phi const, (freeze x)
5402 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5403 if (Instruction *NV = foldOpIntoPhi(I, PN))
5404 return NV;
5405 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5406 return NV;
5407 }
5408
5410 return replaceInstUsesWith(I, NI);
5411
5412 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5413 // - or: pick -1
5414 // - select's condition: if the true value is constant, choose it by making
5415 // the condition true.
5416 // - phi: pick the common constant across operands
5417 // - default: pick 0
5418 //
5419 // Note that this transform is intentionally done here rather than
5420 // via an analysis in InstSimplify or at individual user sites. That is
5421 // because we must produce the same value for all uses of the freeze -
5422 // it's the reason "freeze" exists!
5423 //
5424 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5425 // duplicating logic for binops at least.
5426 auto getUndefReplacement = [&](Type *Ty) {
5427 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5428 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5429 // removed.
5430 Constant *BestValue = nullptr;
5431 for (Value *V : PN.incoming_values()) {
5432 if (match(V, m_Freeze(m_Undef())))
5433 continue;
5434
5436 if (!C)
5437 return nullptr;
5438
5440 return nullptr;
5441
5442 if (BestValue && BestValue != C)
5443 return nullptr;
5444
5445 BestValue = C;
5446 }
5447 return BestValue;
5448 };
5449
5450 Value *NullValue = Constant::getNullValue(Ty);
5451 Value *BestValue = nullptr;
5452 for (auto *U : I.users()) {
5453 Value *V = NullValue;
5454 if (match(U, m_Or(m_Value(), m_Value())))
5456 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5457 V = ConstantInt::getTrue(Ty);
5458 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5459 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5460 V = NullValue;
5461 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5462 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5463 V = MaybeV;
5464 }
5465
5466 if (!BestValue)
5467 BestValue = V;
5468 else if (BestValue != V)
5469 BestValue = NullValue;
5470 }
5471 assert(BestValue && "Must have at least one use");
5472 assert(BestValue != &I && "Cannot replace with itself");
5473 return BestValue;
5474 };
5475
5476 if (match(Op0, m_Undef())) {
5477 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5478 // a shuffle. This may improve codegen for shuffles that allow
5479 // unspecified inputs.
5481 return nullptr;
5482 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5483 }
5484
5485 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5486 Type *Ty = C->getType();
5487 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5488 if (!VTy)
5489 return nullptr;
5490 unsigned NumElts = VTy->getNumElements();
5491 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5492 for (unsigned i = 0; i != NumElts; ++i) {
5493 Constant *EltC = C->getAggregateElement(i);
5494 if (EltC && !match(EltC, m_Undef())) {
5495 BestValue = EltC;
5496 break;
5497 }
5498 }
5499 return Constant::replaceUndefsWith(C, BestValue);
5500 };
5501
5502 Constant *C;
5503 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5504 !C->containsConstantExpression()) {
5505 if (Constant *Repl = getFreezeVectorReplacement(C))
5506 return replaceInstUsesWith(I, Repl);
5507 }
5508
5509 // Replace uses of Op with freeze(Op).
5510 if (freezeOtherUses(I))
5511 return &I;
5512
5513 return nullptr;
5514}
5515
5516/// Check for case where the call writes to an otherwise dead alloca. This
5517/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5518/// helper *only* analyzes the write; doesn't check any other legality aspect.
5520 auto *CB = dyn_cast<CallBase>(I);
5521 if (!CB)
5522 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5523 // to allow reload along used path as described below. Otherwise, this
5524 // is simply a store to a dead allocation which will be removed.
5525 return false;
5526 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5527 if (!Dest)
5528 return false;
5529 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5530 if (!AI)
5531 // TODO: allow malloc?
5532 return false;
5533 // TODO: allow memory access dominated by move point? Note that since AI
5534 // could have a reference to itself captured by the call, we would need to
5535 // account for cycles in doing so.
5536 SmallVector<const User *> AllocaUsers;
5538 auto pushUsers = [&](const Instruction &I) {
5539 for (const User *U : I.users()) {
5540 if (Visited.insert(U).second)
5541 AllocaUsers.push_back(U);
5542 }
5543 };
5544 pushUsers(*AI);
5545 while (!AllocaUsers.empty()) {
5546 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5547 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5548 pushUsers(*UserI);
5549 continue;
5550 }
5551 if (UserI == CB)
5552 continue;
5553 // TODO: support lifetime.start/end here
5554 return false;
5555 }
5556 return true;
5557}
5558
5559/// Try to move the specified instruction from its current block into the
5560/// beginning of DestBlock, which can only happen if it's safe to move the
5561/// instruction past all of the instructions between it and the end of its
5562/// block.
5564 BasicBlock *DestBlock) {
5565 BasicBlock *SrcBlock = I->getParent();
5566
5567 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5568 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5569 I->isTerminator())
5570 return false;
5571
5572 // Do not sink static or dynamic alloca instructions. Static allocas must
5573 // remain in the entry block, and dynamic allocas must not be sunk in between
5574 // a stacksave / stackrestore pair, which would incorrectly shorten its
5575 // lifetime.
5576 if (isa<AllocaInst>(I))
5577 return false;
5578
5579 // Do not sink into catchswitch blocks.
5580 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5581 return false;
5582
5583 // Do not sink convergent call instructions.
5584 if (auto *CI = dyn_cast<CallInst>(I)) {
5585 if (CI->isConvergent())
5586 return false;
5587 }
5588
5589 // Unless we can prove that the memory write isn't visibile except on the
5590 // path we're sinking to, we must bail.
5591 if (I->mayWriteToMemory()) {
5592 if (!SoleWriteToDeadLocal(I, TLI))
5593 return false;
5594 }
5595
5596 // We can only sink load instructions if there is nothing between the load and
5597 // the end of block that could change the value.
5598 if (I->mayReadFromMemory() &&
5599 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5600 // We don't want to do any sophisticated alias analysis, so we only check
5601 // the instructions after I in I's parent block if we try to sink to its
5602 // successor block.
5603 if (DestBlock->getUniquePredecessor() != I->getParent())
5604 return false;
5605 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5606 E = I->getParent()->end();
5607 Scan != E; ++Scan)
5608 if (Scan->mayWriteToMemory())
5609 return false;
5610 }
5611
5612 I->dropDroppableUses([&](const Use *U) {
5613 auto *I = dyn_cast<Instruction>(U->getUser());
5614 if (I && I->getParent() != DestBlock) {
5615 Worklist.add(I);
5616 return true;
5617 }
5618 return false;
5619 });
5620 /// FIXME: We could remove droppable uses that are not dominated by
5621 /// the new position.
5622
5623 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5624 I->moveBefore(*DestBlock, InsertPos);
5625 ++NumSunkInst;
5626
5627 // Also sink all related debug uses from the source basic block. Otherwise we
5628 // get debug use before the def. Attempt to salvage debug uses first, to
5629 // maximise the range variables have location for. If we cannot salvage, then
5630 // mark the location undef: we know it was supposed to receive a new location
5631 // here, but that computation has been sunk.
5632 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5633 findDbgUsers(I, DbgVariableRecords);
5634 if (!DbgVariableRecords.empty())
5635 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5636 DbgVariableRecords);
5637
5638 // PS: there are numerous flaws with this behaviour, not least that right now
5639 // assignments can be re-ordered past other assignments to the same variable
5640 // if they use different Values. Creating more undef assignements can never be
5641 // undone. And salvaging all users outside of this block can un-necessarily
5642 // alter the lifetime of the live-value that the variable refers to.
5643 // Some of these things can be resolved by tolerating debug use-before-defs in
5644 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5645 // being used for more architectures.
5646
5647 return true;
5648}
5649
5651 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5652 BasicBlock *DestBlock,
5653 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5654 // For all debug values in the destination block, the sunk instruction
5655 // will still be available, so they do not need to be dropped.
5656
5657 // Fetch all DbgVariableRecords not already in the destination.
5658 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5659 for (auto &DVR : DbgVariableRecords)
5660 if (DVR->getParent() != DestBlock)
5661 DbgVariableRecordsToSalvage.push_back(DVR);
5662
5663 // Fetch a second collection, of DbgVariableRecords in the source block that
5664 // we're going to sink.
5665 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5666 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5667 if (DVR->getParent() == SrcBlock)
5668 DbgVariableRecordsToSink.push_back(DVR);
5669
5670 // Sort DbgVariableRecords according to their position in the block. This is a
5671 // partial order: DbgVariableRecords attached to different instructions will
5672 // be ordered by the instruction order, but DbgVariableRecords attached to the
5673 // same instruction won't have an order.
5674 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5675 return B->getInstruction()->comesBefore(A->getInstruction());
5676 };
5677 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5678
5679 // If there are two assignments to the same variable attached to the same
5680 // instruction, the ordering between the two assignments is important. Scan
5681 // for this (rare) case and establish which is the last assignment.
5682 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5684 if (DbgVariableRecordsToSink.size() > 1) {
5686 // Count how many assignments to each variable there is per instruction.
5687 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5688 DebugVariable DbgUserVariable =
5689 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5690 DVR->getDebugLoc()->getInlinedAt());
5691 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5692 }
5693
5694 // If there are any instructions with two assignments, add them to the
5695 // FilterOutMap to record that they need extra filtering.
5697 for (auto It : CountMap) {
5698 if (It.second > 1) {
5699 FilterOutMap[It.first] = nullptr;
5700 DupSet.insert(It.first.first);
5701 }
5702 }
5703
5704 // For all instruction/variable pairs needing extra filtering, find the
5705 // latest assignment.
5706 for (const Instruction *Inst : DupSet) {
5707 for (DbgVariableRecord &DVR :
5708 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5709 DebugVariable DbgUserVariable =
5710 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5711 DVR.getDebugLoc()->getInlinedAt());
5712 auto FilterIt =
5713 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5714 if (FilterIt == FilterOutMap.end())
5715 continue;
5716 if (FilterIt->second != nullptr)
5717 continue;
5718 FilterIt->second = &DVR;
5719 }
5720 }
5721 }
5722
5723 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5724 // out any duplicate assignments identified above.
5726 SmallSet<DebugVariable, 4> SunkVariables;
5727 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5729 continue;
5730
5731 DebugVariable DbgUserVariable =
5732 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5733 DVR->getDebugLoc()->getInlinedAt());
5734
5735 // For any variable where there were multiple assignments in the same place,
5736 // ignore all but the last assignment.
5737 if (!FilterOutMap.empty()) {
5738 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5739 auto It = FilterOutMap.find(IVP);
5740
5741 // Filter out.
5742 if (It != FilterOutMap.end() && It->second != DVR)
5743 continue;
5744 }
5745
5746 if (!SunkVariables.insert(DbgUserVariable).second)
5747 continue;
5748
5749 if (DVR->isDbgAssign())
5750 continue;
5751
5752 DVRClones.emplace_back(DVR->clone());
5753 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5754 }
5755
5756 // Perform salvaging without the clones, then sink the clones.
5757 if (DVRClones.empty())
5758 return;
5759
5760 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5761
5762 // The clones are in reverse order of original appearance. Assert that the
5763 // head bit is set on the iterator as we _should_ have received it via
5764 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5765 // we'll repeatedly insert at the head, such as:
5766 // DVR-3 (third insertion goes here)
5767 // DVR-2 (second insertion goes here)
5768 // DVR-1 (first insertion goes here)
5769 // Any-Prior-DVRs
5770 // InsertPtInst
5771 assert(InsertPos.getHeadBit());
5772 for (DbgVariableRecord *DVRClone : DVRClones) {
5773 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5774 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5775 }
5776}
5777
5779 while (!Worklist.isEmpty()) {
5780 // Walk deferred instructions in reverse order, and push them to the
5781 // worklist, which means they'll end up popped from the worklist in-order.
5782 while (Instruction *I = Worklist.popDeferred()) {
5783 // Check to see if we can DCE the instruction. We do this already here to
5784 // reduce the number of uses and thus allow other folds to trigger.
5785 // Note that eraseInstFromFunction() may push additional instructions on
5786 // the deferred worklist, so this will DCE whole instruction chains.
5789 ++NumDeadInst;
5790 continue;
5791 }
5792
5793 Worklist.push(I);
5794 }
5795
5796 Instruction *I = Worklist.removeOne();
5797 if (I == nullptr) continue; // skip null values.
5798
5799 // Check to see if we can DCE the instruction.
5802 ++NumDeadInst;
5803 continue;
5804 }
5805
5806 if (!DebugCounter::shouldExecute(VisitCounter))
5807 continue;
5808
5809 // See if we can trivially sink this instruction to its user if we can
5810 // prove that the successor is not executed more frequently than our block.
5811 // Return the UserBlock if successful.
5812 auto getOptionalSinkBlockForInst =
5813 [this](Instruction *I) -> std::optional<BasicBlock *> {
5814 if (!EnableCodeSinking)
5815 return std::nullopt;
5816
5817 BasicBlock *BB = I->getParent();
5818 BasicBlock *UserParent = nullptr;
5819 unsigned NumUsers = 0;
5820
5821 for (Use &U : I->uses()) {
5822 User *User = U.getUser();
5823 if (User->isDroppable()) {
5824 // Do not sink if there are dereferenceable assumes that would be
5825 // removed.
5827 if (II->getIntrinsicID() != Intrinsic::assume ||
5828 !II->getOperandBundle("dereferenceable"))
5829 continue;
5830 }
5831
5832 if (NumUsers > MaxSinkNumUsers)
5833 return std::nullopt;
5834
5835 Instruction *UserInst = cast<Instruction>(User);
5836 // Special handling for Phi nodes - get the block the use occurs in.
5837 BasicBlock *UserBB = UserInst->getParent();
5838 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5839 UserBB = PN->getIncomingBlock(U);
5840 // Bail out if we have uses in different blocks. We don't do any
5841 // sophisticated analysis (i.e finding NearestCommonDominator of these
5842 // use blocks).
5843 if (UserParent && UserParent != UserBB)
5844 return std::nullopt;
5845 UserParent = UserBB;
5846
5847 // Make sure these checks are done only once, naturally we do the checks
5848 // the first time we get the userparent, this will save compile time.
5849 if (NumUsers == 0) {
5850 // Try sinking to another block. If that block is unreachable, then do
5851 // not bother. SimplifyCFG should handle it.
5852 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5853 return std::nullopt;
5854
5855 auto *Term = UserParent->getTerminator();
5856 // See if the user is one of our successors that has only one
5857 // predecessor, so that we don't have to split the critical edge.
5858 // Another option where we can sink is a block that ends with a
5859 // terminator that does not pass control to other block (such as
5860 // return or unreachable or resume). In this case:
5861 // - I dominates the User (by SSA form);
5862 // - the User will be executed at most once.
5863 // So sinking I down to User is always profitable or neutral.
5864 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5865 return std::nullopt;
5866
5867 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5868 }
5869
5870 NumUsers++;
5871 }
5872
5873 // No user or only has droppable users.
5874 if (!UserParent)
5875 return std::nullopt;
5876
5877 return UserParent;
5878 };
5879
5880 auto OptBB = getOptionalSinkBlockForInst(I);
5881 if (OptBB) {
5882 auto *UserParent = *OptBB;
5883 // Okay, the CFG is simple enough, try to sink this instruction.
5884 if (tryToSinkInstruction(I, UserParent)) {
5885 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5886 MadeIRChange = true;
5887 // We'll add uses of the sunk instruction below, but since
5888 // sinking can expose opportunities for it's *operands* add
5889 // them to the worklist
5890 for (Use &U : I->operands())
5891 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5892 Worklist.push(OpI);
5893 }
5894 }
5895
5896 // Now that we have an instruction, try combining it to simplify it.
5897 Builder.SetInsertPoint(I);
5898 Builder.CollectMetadataToCopy(
5899 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5900
5901#ifndef NDEBUG
5902 std::string OrigI;
5903#endif
5904 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5905 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5906
5907 if (Instruction *Result = visit(*I)) {
5908 ++NumCombined;
5909 // Should we replace the old instruction with a new one?
5910 if (Result != I) {
5911 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5912 << " New = " << *Result << '\n');
5913
5914 // We copy the old instruction's DebugLoc to the new instruction, unless
5915 // InstCombine already assigned a DebugLoc to it, in which case we
5916 // should trust the more specifically selected DebugLoc.
5917 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5918 // We also copy annotation metadata to the new instruction.
5919 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5920 // Everything uses the new instruction now.
5921 I->replaceAllUsesWith(Result);
5922
5923 // Move the name to the new instruction first.
5924 Result->takeName(I);
5925
5926 // Insert the new instruction into the basic block...
5927 BasicBlock *InstParent = I->getParent();
5928 BasicBlock::iterator InsertPos = I->getIterator();
5929
5930 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5931 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5932 // We need to fix up the insertion point.
5933 if (isa<PHINode>(I)) // PHI -> Non-PHI
5934 InsertPos = InstParent->getFirstInsertionPt();
5935 else // Non-PHI -> PHI
5936 InsertPos = InstParent->getFirstNonPHIIt();
5937 }
5938
5939 Result->insertInto(InstParent, InsertPos);
5940
5941 // Push the new instruction and any users onto the worklist.
5942 Worklist.pushUsersToWorkList(*Result);
5943 Worklist.push(Result);
5944
5946 } else {
5947 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5948 << " New = " << *I << '\n');
5949
5950 // If the instruction was modified, it's possible that it is now dead.
5951 // if so, remove it.
5954 } else {
5955 Worklist.pushUsersToWorkList(*I);
5956 Worklist.push(I);
5957 }
5958 }
5959 MadeIRChange = true;
5960 }
5961 }
5962
5963 Worklist.zap();
5964 return MadeIRChange;
5965}
5966
5967// Track the scopes used by !alias.scope and !noalias. In a function, a
5968// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5969// by both sets. If not, the declaration of the scope can be safely omitted.
5970// The MDNode of the scope can be omitted as well for the instructions that are
5971// part of this function. We do not do that at this point, as this might become
5972// too time consuming to do.
5974 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5975 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5976
5977public:
5979 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5980 if (!I->hasMetadataOtherThanDebugLoc())
5981 return;
5982
5983 auto Track = [](Metadata *ScopeList, auto &Container) {
5984 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5985 if (!MDScopeList || !Container.insert(MDScopeList).second)
5986 return;
5987 for (const auto &MDOperand : MDScopeList->operands())
5988 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5989 Container.insert(MDScope);
5990 };
5991
5992 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5993 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5994 }
5995
5998 if (!Decl)
5999 return false;
6000
6001 assert(Decl->use_empty() &&
6002 "llvm.experimental.noalias.scope.decl in use ?");
6003 const MDNode *MDSL = Decl->getScopeList();
6004 assert(MDSL->getNumOperands() == 1 &&
6005 "llvm.experimental.noalias.scope should refer to a single scope");
6006 auto &MDOperand = MDSL->getOperand(0);
6007 if (auto *MD = dyn_cast<MDNode>(MDOperand))
6008 return !UsedAliasScopesAndLists.contains(MD) ||
6009 !UsedNoAliasScopesAndLists.contains(MD);
6010
6011 // Not an MDNode ? throw away.
6012 return true;
6013 }
6014};
6015
6016/// Populate the IC worklist from a function, by walking it in reverse
6017/// post-order and adding all reachable code to the worklist.
6018///
6019/// This has a couple of tricks to make the code faster and more powerful. In
6020/// particular, we constant fold and DCE instructions as we go, to avoid adding
6021/// them to the worklist (this significantly speeds up instcombine on code where
6022/// many instructions are dead or constant). Additionally, if we find a branch
6023/// whose condition is a known constant, we only visit the reachable successors.
6025 bool MadeIRChange = false;
6027 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
6028 DenseMap<Constant *, Constant *> FoldedConstants;
6029 AliasScopeTracker SeenAliasScopes;
6030
6031 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
6032 for (BasicBlock *Succ : successors(BB))
6033 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
6034 for (PHINode &PN : Succ->phis())
6035 for (Use &U : PN.incoming_values())
6036 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
6037 U.set(PoisonValue::get(PN.getType()));
6038 MadeIRChange = true;
6039 }
6040 };
6041
6042 for (BasicBlock *BB : RPOT) {
6043 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
6044 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
6045 })) {
6046 HandleOnlyLiveSuccessor(BB, nullptr);
6047 continue;
6048 }
6049 LiveBlocks.insert(BB);
6050
6051 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
6052 // ConstantProp instruction if trivially constant.
6053 if (!Inst.use_empty() &&
6054 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
6055 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
6056 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
6057 << '\n');
6058 Inst.replaceAllUsesWith(C);
6059 ++NumConstProp;
6060 if (isInstructionTriviallyDead(&Inst, &TLI))
6061 Inst.eraseFromParent();
6062 MadeIRChange = true;
6063 continue;
6064 }
6065
6066 // See if we can constant fold its operands.
6067 for (Use &U : Inst.operands()) {
6069 continue;
6070
6071 auto *C = cast<Constant>(U);
6072 Constant *&FoldRes = FoldedConstants[C];
6073 if (!FoldRes)
6074 FoldRes = ConstantFoldConstant(C, DL, &TLI);
6075
6076 if (FoldRes != C) {
6077 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
6078 << "\n Old = " << *C
6079 << "\n New = " << *FoldRes << '\n');
6080 U = FoldRes;
6081 MadeIRChange = true;
6082 }
6083 }
6084
6085 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
6086 // these call instructions consumes non-trivial amount of time and
6087 // provides no value for the optimization.
6088 if (!Inst.isDebugOrPseudoInst()) {
6089 InstrsForInstructionWorklist.push_back(&Inst);
6090 SeenAliasScopes.analyse(&Inst);
6091 }
6092 }
6093
6094 // If this is a branch or switch on a constant, mark only the single
6095 // live successor. Otherwise assume all successors are live.
6096 Instruction *TI = BB->getTerminator();
6097 if (CondBrInst *BI = dyn_cast<CondBrInst>(TI)) {
6098 if (isa<UndefValue>(BI->getCondition())) {
6099 // Branch on undef is UB.
6100 HandleOnlyLiveSuccessor(BB, nullptr);
6101 continue;
6102 }
6103 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
6104 bool CondVal = Cond->getZExtValue();
6105 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
6106 continue;
6107 }
6108 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
6109 if (isa<UndefValue>(SI->getCondition())) {
6110 // Switch on undef is UB.
6111 HandleOnlyLiveSuccessor(BB, nullptr);
6112 continue;
6113 }
6114 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
6115 HandleOnlyLiveSuccessor(BB,
6116 SI->findCaseValue(Cond)->getCaseSuccessor());
6117 continue;
6118 }
6119 }
6120 }
6121
6122 // Remove instructions inside unreachable blocks. This prevents the
6123 // instcombine code from having to deal with some bad special cases, and
6124 // reduces use counts of instructions.
6125 for (BasicBlock &BB : F) {
6126 if (LiveBlocks.count(&BB))
6127 continue;
6128
6129 unsigned NumDeadInstInBB;
6130 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
6131
6132 MadeIRChange |= NumDeadInstInBB != 0;
6133 NumDeadInst += NumDeadInstInBB;
6134 }
6135
6136 // Once we've found all of the instructions to add to instcombine's worklist,
6137 // add them in reverse order. This way instcombine will visit from the top
6138 // of the function down. This jives well with the way that it adds all uses
6139 // of instructions to the worklist after doing a transformation, thus avoiding
6140 // some N^2 behavior in pathological cases.
6141 Worklist.reserve(InstrsForInstructionWorklist.size());
6142 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
6143 // DCE instruction if trivially dead. As we iterate in reverse program
6144 // order here, we will clean up whole chains of dead instructions.
6145 if (isInstructionTriviallyDead(Inst, &TLI) ||
6146 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
6147 ++NumDeadInst;
6148 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
6149 salvageDebugInfo(*Inst);
6150 Inst->eraseFromParent();
6151 MadeIRChange = true;
6152 continue;
6153 }
6154
6155 Worklist.push(Inst);
6156 }
6157
6158 return MadeIRChange;
6159}
6160
6162 // Collect backedges.
6163 SmallVector<bool> Visited(F.getMaxBlockNumber());
6164 for (BasicBlock *BB : RPOT) {
6165 Visited[BB->getNumber()] = true;
6166 for (BasicBlock *Succ : successors(BB))
6167 if (Visited[Succ->getNumber()])
6168 BackEdges.insert({BB, Succ});
6169 }
6170 ComputedBackEdges = true;
6171}
6172
6178 const InstCombineOptions &Opts) {
6179 auto &DL = F.getDataLayout();
6180 bool VerifyFixpoint = Opts.VerifyFixpoint &&
6181 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
6182
6183 /// Builder - This is an IRBuilder that automatically inserts new
6184 /// instructions into the worklist when they are created.
6186 F.getContext(), TargetFolder(DL),
6187 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
6188 Worklist.add(I);
6189 if (auto *Assume = dyn_cast<AssumeInst>(I))
6190 AC.registerAssumption(Assume);
6191 }));
6192
6194
6195 // Lower dbg.declare intrinsics otherwise their value may be clobbered
6196 // by instcombiner.
6197 bool MadeIRChange = false;
6199 MadeIRChange = LowerDbgDeclare(F);
6200
6201 // Iterate while there is work to do.
6202 unsigned Iteration = 0;
6203 while (true) {
6204 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
6205 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
6206 << " on " << F.getName()
6207 << " reached; stopping without verifying fixpoint\n");
6208 break;
6209 }
6210
6211 ++Iteration;
6212 ++NumWorklistIterations;
6213 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
6214 << F.getName() << "\n");
6215
6216 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
6217 BPI, PSI, DL, RPOT);
6219 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
6220 MadeChangeInThisIteration |= IC.run();
6221 if (!MadeChangeInThisIteration)
6222 break;
6223
6224 MadeIRChange = true;
6225 if (Iteration > Opts.MaxIterations) {
6227 "Instruction Combining on " + Twine(F.getName()) +
6228 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
6229 " iterations. " +
6230 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6231 "'instcombine-no-verify-fixpoint' to suppress this error.");
6232 }
6233 }
6234
6235 if (Iteration == 1)
6236 ++NumOneIteration;
6237 else if (Iteration == 2)
6238 ++NumTwoIterations;
6239 else if (Iteration == 3)
6240 ++NumThreeIterations;
6241 else
6242 ++NumFourOrMoreIterations;
6243
6244 return MadeIRChange;
6245}
6246
6248
6250 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6251 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6252 OS, MapClassName2PassName);
6253 OS << '<';
6254 OS << "max-iterations=" << Options.MaxIterations << ";";
6255 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6256 OS << '>';
6257}
6258
6259char InstCombinePass::ID = 0;
6260
6263 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6264 // No changes since last InstCombine pass, exit early.
6265 if (LRT.shouldSkip(&ID))
6266 return PreservedAnalyses::all();
6267
6268 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6269 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6270 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6272 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6273
6274 auto *AA = &AM.getResult<AAManager>(F);
6275 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6276 ProfileSummaryInfo *PSI =
6277 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6278 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6279 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6281
6282 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6283 BFI, BPI, PSI, Options)) {
6284 // No changes, all analyses are preserved.
6285 LRT.update(&ID, /*Changed=*/false);
6286 return PreservedAnalyses::all();
6287 }
6288
6289 // Mark all the analyses that instcombine updates as preserved.
6291 LRT.update(&ID, /*Changed=*/true);
6294 return PA;
6295}
6296
6312
6314 if (skipFunction(F))
6315 return false;
6316
6317 // Required analyses.
6318 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6319 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6320 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6322 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6324
6325 // Optional analyses.
6326 ProfileSummaryInfo *PSI =
6328 BlockFrequencyInfo *BFI =
6329 (PSI && PSI->hasProfileSummary()) ?
6331 nullptr;
6332 BranchProbabilityInfo *BPI = nullptr;
6333 if (auto *WrapperPass =
6335 BPI = &WrapperPass->getBPI();
6336
6337 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6338 BFI, BPI, PSI, InstCombineOptions());
6339}
6340
6342
6344
6346 "Combine redundant instructions", false, false)
6357 "Combine redundant instructions", false, false)
6358
6359// Initialization Routines.
6363
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "(X ROp Y) LOp Z" is always equal to "(X LOp Z) ROp (Y LOp Z)".
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static Constant * constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector, Constant *Splat, bool SplatLHS, const DataLayout &DL)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static Instruction * foldSpliceBinOp(BinaryOperator &Inst, InstCombiner::BuilderTy &Builder)
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
The Input class is used to parse a yaml document into in-memory structs and vectors.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:214
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1809
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1941
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:967
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1979
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2011
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1992
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:219
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:482
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Conditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
ConstantArray - Constant Array Declarations.
Definition Constants.h:576
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:932
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:660
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:316
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:809
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2084
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:544
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2847
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * visitCondBrInst(CondBrInst &BI)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth=0)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * visitUncondBrInst(UncondBrInst &BI)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1654
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:116
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:310
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:110
Unconditional Branch instruction.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:254
op_range operands()
Definition User.h:267
op_iterator op_begin()
Definition User.h:259
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:737
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:146
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool use_empty() const
Definition Value.h:346
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:890
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
match_combine_and< Ty... > m_CombineAnd(const Ty &...Ps)
Combine pattern matchers matching all of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
auto m_Poison()
Match an arbitrary poison constant.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_or< CastInst_match< OpTy, UIToFPInst >, CastInst_match< OpTy, SIToFPInst > > m_IToFP(const OpTy &Op)
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_Constant()
Match an arbitrary Constant and ignore it.
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
Splat_match< T > m_ConstantSplat(const T &SubPattern)
Match a constant splat. TODO: Extend this to non-constant splats.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
SelectLike_match< CondTy, LTy, RTy > m_SelectLike(const CondTy &C, const LTy &TrueC, const RTy &FalseC)
Matches a value that behaves like a boolean-controlled select, i.e.
auto m_MaxOrMin(const LHS &L, const RHS &R)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_VectorInsert(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:557
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2115
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition LoopInfo.cpp:60
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2500
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:153
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1682
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1640
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2483
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:149
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1810
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1677
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2052
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2429
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:322
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2018
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2145
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:265
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:262
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const