LLVM 23.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147namespace llvm {
149} // end namespace llvm
150
151// FIXME: Remove this flag when it is no longer necessary to convert
152// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
153// increases variable availability at the cost of accuracy. Variables that
154// cannot be promoted by mem2reg or SROA will be described as living in memory
155// for their entire lifetime. However, passes like DSE and instcombine can
156// delete stores to the alloca, leading to misleading and inaccurate debug
157// information. This flag can be removed when those passes are fixed.
158static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
159 cl::Hidden, cl::init(true));
160
161std::optional<Instruction *>
163 // Handle target specific intrinsics
164 if (II.getCalledFunction()->isTargetIntrinsic()) {
165 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
166 }
167 return std::nullopt;
168}
169
171 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
172 bool &KnownBitsComputed) {
173 // Handle target specific intrinsics
174 if (II.getCalledFunction()->isTargetIntrinsic()) {
175 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
176 *this, II, DemandedMask, Known, KnownBitsComputed);
177 }
178 return std::nullopt;
179}
180
182 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
183 APInt &PoisonElts2, APInt &PoisonElts3,
184 std::function<void(Instruction *, unsigned, APInt, APInt &)>
185 SimplifyAndSetOp) {
186 // Handle target specific intrinsics
187 if (II.getCalledFunction()->isTargetIntrinsic()) {
188 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
189 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
190 SimplifyAndSetOp);
191 }
192 return std::nullopt;
193}
194
195bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
196 // Approved exception for TTI use: This queries a legality property of the
197 // target, not an profitability heuristic. Ideally this should be part of
198 // DataLayout instead.
199 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
200}
201
202Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
203 if (!RewriteGEP)
204 return llvm::emitGEPOffset(&Builder, DL, GEP);
205
206 IRBuilderBase::InsertPointGuard Guard(Builder);
207 auto *Inst = dyn_cast<Instruction>(GEP);
208 if (Inst)
209 Builder.SetInsertPoint(Inst);
210
211 Value *Offset = EmitGEPOffset(GEP);
212 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
213 if (Inst && !GEP->hasAllConstantIndices() &&
214 !GEP->getSourceElementType()->isIntegerTy(8)) {
216 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
217 Offset, "", GEP->getNoWrapFlags()));
219 }
220 return Offset;
221}
222
223Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
224 GEPNoWrapFlags NW, Type *IdxTy,
225 bool RewriteGEPs) {
226 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
227 if (Sum)
228 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
229 NW.isInBounds());
230 else
231 return Offset;
232 };
233
234 Value *Sum = nullptr;
235 Value *OneUseSum = nullptr;
236 Value *OneUseBase = nullptr;
237 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
238 for (GEPOperator *GEP : reverse(GEPs)) {
239 Value *Offset;
240 {
241 // Expand the offset at the point of the previous GEP to enable rewriting.
242 // However, use the original insertion point for calculating Sum.
243 IRBuilderBase::InsertPointGuard Guard(Builder);
244 auto *Inst = dyn_cast<Instruction>(GEP);
245 if (RewriteGEPs && Inst)
246 Builder.SetInsertPoint(Inst);
247
249 if (Offset->getType() != IdxTy)
250 Offset = Builder.CreateVectorSplat(
251 cast<VectorType>(IdxTy)->getElementCount(), Offset);
252 if (GEP->hasOneUse()) {
253 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
254 OneUseSum = Add(OneUseSum, Offset);
255 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
256 if (!OneUseBase)
257 OneUseBase = GEP->getPointerOperand();
258 continue;
259 }
260
261 if (OneUseSum)
262 Offset = Add(OneUseSum, Offset);
263
264 // Rewrite the GEP to reuse the computed offset. This also includes
265 // offsets from preceding one-use GEPs.
266 if (RewriteGEPs && Inst &&
267 !(GEP->getSourceElementType()->isIntegerTy(8) &&
268 GEP->getOperand(1) == Offset)) {
270 *Inst,
271 Builder.CreatePtrAdd(
272 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
273 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
275 }
276 }
277
278 Sum = Add(Sum, Offset);
279 OneUseSum = OneUseBase = nullptr;
280 OneUseFlags = GEPNoWrapFlags::all();
281 }
282 if (OneUseSum)
283 Sum = Add(Sum, OneUseSum);
284 if (!Sum)
285 return Constant::getNullValue(IdxTy);
286 return Sum;
287}
288
289/// Legal integers and common types are considered desirable. This is used to
290/// avoid creating instructions with types that may not be supported well by the
291/// the backend.
292/// NOTE: This treats i8, i16 and i32 specially because they are common
293/// types in frontend languages.
294bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
295 switch (BitWidth) {
296 case 8:
297 case 16:
298 case 32:
299 return true;
300 default:
301 return DL.isLegalInteger(BitWidth);
302 }
303}
304
305/// Return true if it is desirable to convert an integer computation from a
306/// given bit width to a new bit width.
307/// We don't want to convert from a legal or desirable type (like i8) to an
308/// illegal type or from a smaller to a larger illegal type. A width of '1'
309/// is always treated as a desirable type because i1 is a fundamental type in
310/// IR, and there are many specialized optimizations for i1 types.
311/// Common/desirable widths are equally treated as legal to convert to, in
312/// order to open up more combining opportunities.
313bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
314 unsigned ToWidth) const {
315 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
316 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
317
318 // Convert to desirable widths even if they are not legal types.
319 // Only shrink types, to prevent infinite loops.
320 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
321 return true;
322
323 // If this is a legal or desiable integer from type, and the result would be
324 // an illegal type, don't do the transformation.
325 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
326 return false;
327
328 // Otherwise, if both are illegal, do not increase the size of the result. We
329 // do allow things like i160 -> i64, but not i64 -> i160.
330 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
331 return false;
332
333 return true;
334}
335
336/// Return true if it is desirable to convert a computation from 'From' to 'To'.
337/// We don't want to convert from a legal to an illegal type or from a smaller
338/// to a larger illegal type. i1 is always treated as a legal type because it is
339/// a fundamental type in IR, and there are many specialized optimizations for
340/// i1 types.
341bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
342 // TODO: This could be extended to allow vectors. Datalayout changes might be
343 // needed to properly support that.
344 if (!From->isIntegerTy() || !To->isIntegerTy())
345 return false;
346
347 unsigned FromWidth = From->getPrimitiveSizeInBits();
348 unsigned ToWidth = To->getPrimitiveSizeInBits();
349 return shouldChangeType(FromWidth, ToWidth);
350}
351
352// Return true, if No Signed Wrap should be maintained for I.
353// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
354// where both B and C should be ConstantInts, results in a constant that does
355// not overflow. This function only handles the Add/Sub/Mul opcodes. For
356// all other opcodes, the function conservatively returns false.
359 if (!OBO || !OBO->hasNoSignedWrap())
360 return false;
361
362 const APInt *BVal, *CVal;
363 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
364 return false;
365
366 // We reason about Add/Sub/Mul Only.
367 bool Overflow = false;
368 switch (I.getOpcode()) {
369 case Instruction::Add:
370 (void)BVal->sadd_ov(*CVal, Overflow);
371 break;
372 case Instruction::Sub:
373 (void)BVal->ssub_ov(*CVal, Overflow);
374 break;
375 case Instruction::Mul:
376 (void)BVal->smul_ov(*CVal, Overflow);
377 break;
378 default:
379 // Conservatively return false for other opcodes.
380 return false;
381 }
382 return !Overflow;
383}
384
387 return OBO && OBO->hasNoUnsignedWrap();
388}
389
392 return OBO && OBO->hasNoSignedWrap();
393}
394
395/// Conservatively clears subclassOptionalData after a reassociation or
396/// commutation. We preserve fast-math flags when applicable as they can be
397/// preserved.
400 if (!FPMO) {
401 I.clearSubclassOptionalData();
402 return;
403 }
404
405 FastMathFlags FMF = I.getFastMathFlags();
406 I.clearSubclassOptionalData();
407 I.setFastMathFlags(FMF);
408}
409
410/// Combine constant operands of associative operations either before or after a
411/// cast to eliminate one of the associative operations:
412/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
413/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
415 InstCombinerImpl &IC) {
416 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
417 if (!Cast || !Cast->hasOneUse())
418 return false;
419
420 // TODO: Enhance logic for other casts and remove this check.
421 auto CastOpcode = Cast->getOpcode();
422 if (CastOpcode != Instruction::ZExt)
423 return false;
424
425 // TODO: Enhance logic for other BinOps and remove this check.
426 if (!BinOp1->isBitwiseLogicOp())
427 return false;
428
429 auto AssocOpcode = BinOp1->getOpcode();
430 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
431 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
432 return false;
433
434 Constant *C1, *C2;
435 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
436 !match(BinOp2->getOperand(1), m_Constant(C2)))
437 return false;
438
439 // TODO: This assumes a zext cast.
440 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
441 // to the destination type might lose bits.
442
443 // Fold the constants together in the destination type:
444 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
445 const DataLayout &DL = IC.getDataLayout();
446 Type *DestTy = C1->getType();
447 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
448 if (!CastC2)
449 return false;
450 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
451 if (!FoldedC)
452 return false;
453
454 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
455 IC.replaceOperand(*BinOp1, 1, FoldedC);
457 Cast->dropPoisonGeneratingFlags();
458 return true;
459}
460
461// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
462// inttoptr ( ptrtoint (x) ) --> x
463Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
464 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
465 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
466 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
467 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
468 Type *CastTy = IntToPtr->getDestTy();
469 if (PtrToInt &&
470 CastTy->getPointerAddressSpace() ==
471 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
472 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
473 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
474 return PtrToInt->getOperand(0);
475 }
476 return nullptr;
477}
478
479/// This performs a few simplifications for operators that are associative or
480/// commutative:
481///
482/// Commutative operators:
483///
484/// 1. Order operands such that they are listed from right (least complex) to
485/// left (most complex). This puts constants before unary operators before
486/// binary operators.
487///
488/// Associative operators:
489///
490/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
491/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
492///
493/// Associative and commutative operators:
494///
495/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
496/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
497/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
498/// if C1 and C2 are constants.
500 Instruction::BinaryOps Opcode = I.getOpcode();
501 bool Changed = false;
502
503 do {
504 // Order operands such that they are listed from right (least complex) to
505 // left (most complex). This puts constants before unary operators before
506 // binary operators.
507 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
508 getComplexity(I.getOperand(1)))
509 Changed = !I.swapOperands();
510
511 if (I.isCommutative()) {
512 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
513 replaceOperand(I, 0, Pair->first);
514 replaceOperand(I, 1, Pair->second);
515 Changed = true;
516 }
517 }
518
519 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
520 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
521
522 if (I.isAssociative()) {
523 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
524 if (Op0 && Op0->getOpcode() == Opcode) {
525 Value *A = Op0->getOperand(0);
526 Value *B = Op0->getOperand(1);
527 Value *C = I.getOperand(1);
528
529 // Does "B op C" simplify?
530 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
531 // It simplifies to V. Form "A op V".
532 replaceOperand(I, 0, A);
533 replaceOperand(I, 1, V);
534 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
535 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
536
537 // Conservatively clear all optional flags since they may not be
538 // preserved by the reassociation. Reset nsw/nuw based on the above
539 // analysis.
541
542 // Note: this is only valid because SimplifyBinOp doesn't look at
543 // the operands to Op0.
544 if (IsNUW)
545 I.setHasNoUnsignedWrap(true);
546
547 if (IsNSW)
548 I.setHasNoSignedWrap(true);
549
550 Changed = true;
551 ++NumReassoc;
552 continue;
553 }
554 }
555
556 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
557 if (Op1 && Op1->getOpcode() == Opcode) {
558 Value *A = I.getOperand(0);
559 Value *B = Op1->getOperand(0);
560 Value *C = Op1->getOperand(1);
561
562 // Does "A op B" simplify?
563 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
564 // It simplifies to V. Form "V op C".
565 replaceOperand(I, 0, V);
566 replaceOperand(I, 1, C);
567 // Conservatively clear the optional flags, since they may not be
568 // preserved by the reassociation.
570 Changed = true;
571 ++NumReassoc;
572 continue;
573 }
574 }
575 }
576
577 if (I.isAssociative() && I.isCommutative()) {
578 if (simplifyAssocCastAssoc(&I, *this)) {
579 Changed = true;
580 ++NumReassoc;
581 continue;
582 }
583
584 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
585 if (Op0 && Op0->getOpcode() == Opcode) {
586 Value *A = Op0->getOperand(0);
587 Value *B = Op0->getOperand(1);
588 Value *C = I.getOperand(1);
589
590 // Does "C op A" simplify?
591 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
592 // It simplifies to V. Form "V op B".
593 replaceOperand(I, 0, V);
594 replaceOperand(I, 1, B);
595 // Conservatively clear the optional flags, since they may not be
596 // preserved by the reassociation.
598 Changed = true;
599 ++NumReassoc;
600 continue;
601 }
602 }
603
604 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
605 if (Op1 && Op1->getOpcode() == Opcode) {
606 Value *A = I.getOperand(0);
607 Value *B = Op1->getOperand(0);
608 Value *C = Op1->getOperand(1);
609
610 // Does "C op A" simplify?
611 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
612 // It simplifies to V. Form "B op V".
613 replaceOperand(I, 0, B);
614 replaceOperand(I, 1, V);
615 // Conservatively clear the optional flags, since they may not be
616 // preserved by the reassociation.
618 Changed = true;
619 ++NumReassoc;
620 continue;
621 }
622 }
623
624 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
625 // if C1 and C2 are constants.
626 Value *A, *B;
627 Constant *C1, *C2, *CRes;
628 if (Op0 && Op1 &&
629 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
630 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
631 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
632 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
633 bool IsNUW = hasNoUnsignedWrap(I) &&
634 hasNoUnsignedWrap(*Op0) &&
635 hasNoUnsignedWrap(*Op1);
636 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
637 BinaryOperator::CreateNUW(Opcode, A, B) :
638 BinaryOperator::Create(Opcode, A, B);
639
640 if (isa<FPMathOperator>(NewBO)) {
641 FastMathFlags Flags = I.getFastMathFlags() &
642 Op0->getFastMathFlags() &
643 Op1->getFastMathFlags();
644 NewBO->setFastMathFlags(Flags);
645 }
646 InsertNewInstWith(NewBO, I.getIterator());
647 NewBO->takeName(Op1);
648 replaceOperand(I, 0, NewBO);
649 replaceOperand(I, 1, CRes);
650 // Conservatively clear the optional flags, since they may not be
651 // preserved by the reassociation.
653 if (IsNUW)
654 I.setHasNoUnsignedWrap(true);
655
656 Changed = true;
657 continue;
658 }
659 }
660
661 // No further simplifications.
662 return Changed;
663 } while (true);
664}
665
666/// Return whether "X LOp (Y ROp Z)" is always equal to
667/// "(X LOp Y) ROp (X LOp Z)".
670 // X & (Y | Z) <--> (X & Y) | (X & Z)
671 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
672 if (LOp == Instruction::And)
673 return ROp == Instruction::Or || ROp == Instruction::Xor;
674
675 // X | (Y & Z) <--> (X | Y) & (X | Z)
676 if (LOp == Instruction::Or)
677 return ROp == Instruction::And;
678
679 // X * (Y + Z) <--> (X * Y) + (X * Z)
680 // X * (Y - Z) <--> (X * Y) - (X * Z)
681 if (LOp == Instruction::Mul)
682 return ROp == Instruction::Add || ROp == Instruction::Sub;
683
684 return false;
685}
686
687/// Return whether "(X LOp Y) ROp Z" is always equal to
688/// "(X ROp Z) LOp (Y ROp Z)".
692 return leftDistributesOverRight(ROp, LOp);
693
694 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
696
697 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
698 // but this requires knowing that the addition does not overflow and other
699 // such subtleties.
700}
701
702/// This function returns identity value for given opcode, which can be used to
703/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
705 if (isa<Constant>(V))
706 return nullptr;
707
708 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
709}
710
711/// This function predicates factorization using distributive laws. By default,
712/// it just returns the 'Op' inputs. But for special-cases like
713/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
714/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
715/// allow more factorization opportunities.
718 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
719 assert(Op && "Expected a binary operator");
720 LHS = Op->getOperand(0);
721 RHS = Op->getOperand(1);
722 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
723 Constant *C;
724 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
725 // X << C --> X * (1 << C)
727 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
728 assert(RHS && "Constant folding of immediate constants failed");
729 return Instruction::Mul;
730 }
731 // TODO: We can add other conversions e.g. shr => div etc.
732 }
733 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
734 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
736 // lshr nneg C, X --> ashr nneg C, X
737 return Instruction::AShr;
738 }
739 }
740 return Op->getOpcode();
741}
742
743/// This tries to simplify binary operations by factorizing out common terms
744/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
747 Instruction::BinaryOps InnerOpcode, Value *A,
748 Value *B, Value *C, Value *D) {
749 assert(A && B && C && D && "All values must be provided");
750
751 Value *V = nullptr;
752 Value *RetVal = nullptr;
753 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
754 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
755
756 // Does "X op' Y" always equal "Y op' X"?
757 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
758
759 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
760 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
761 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
762 // commutative case, "(A op' B) op (C op' A)"?
763 if (A == C || (InnerCommutative && A == D)) {
764 if (A != C)
765 std::swap(C, D);
766 // Consider forming "A op' (B op D)".
767 // If "B op D" simplifies then it can be formed with no cost.
768 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
769
770 // If "B op D" doesn't simplify then only go on if one of the existing
771 // operations "A op' B" and "C op' D" will be zapped as no longer used.
772 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
773 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
774 if (V)
775 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
776 }
777 }
778
779 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
780 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
781 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
782 // commutative case, "(A op' B) op (B op' D)"?
783 if (B == D || (InnerCommutative && B == C)) {
784 if (B != D)
785 std::swap(C, D);
786 // Consider forming "(A op C) op' B".
787 // If "A op C" simplifies then it can be formed with no cost.
788 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
789
790 // If "A op C" doesn't simplify then only go on if one of the existing
791 // operations "A op' B" and "C op' D" will be zapped as no longer used.
792 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
793 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
794 if (V)
795 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
796 }
797 }
798
799 if (!RetVal)
800 return nullptr;
801
802 ++NumFactor;
803 RetVal->takeName(&I);
804
805 // Try to add no-overflow flags to the final value.
806 if (isa<BinaryOperator>(RetVal)) {
807 bool HasNSW = false;
808 bool HasNUW = false;
810 HasNSW = I.hasNoSignedWrap();
811 HasNUW = I.hasNoUnsignedWrap();
812 }
813 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
814 HasNSW &= LOBO->hasNoSignedWrap();
815 HasNUW &= LOBO->hasNoUnsignedWrap();
816 }
817
818 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
819 HasNSW &= ROBO->hasNoSignedWrap();
820 HasNUW &= ROBO->hasNoUnsignedWrap();
821 }
822
823 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
824 // We can propagate 'nsw' if we know that
825 // %Y = mul nsw i16 %X, C
826 // %Z = add nsw i16 %Y, %X
827 // =>
828 // %Z = mul nsw i16 %X, C+1
829 //
830 // iff C+1 isn't INT_MIN
831 const APInt *CInt;
832 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
833 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
834
835 // nuw can be propagated with any constant or nuw value.
836 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
837 }
838 }
839 return RetVal;
840}
841
842// If `I` has one Const operand and the other matches `(ctpop (not x))`,
843// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
844// This is only useful is the new subtract can fold so we only handle the
845// following cases:
846// 1) (add/sub/disjoint_or C, (ctpop (not x))
847// -> (add/sub/disjoint_or C', (ctpop x))
848// 1) (cmp pred C, (ctpop (not x))
849// -> (cmp pred C', (ctpop x))
851 unsigned Opc = I->getOpcode();
852 unsigned ConstIdx = 1;
853 switch (Opc) {
854 default:
855 return nullptr;
856 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
857 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
858 // is constant.
859 case Instruction::Sub:
860 ConstIdx = 0;
861 break;
862 case Instruction::ICmp:
863 // Signed predicates aren't correct in some edge cases like for i2 types, as
864 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
865 // comparisons against it are simplfied to unsigned.
866 if (cast<ICmpInst>(I)->isSigned())
867 return nullptr;
868 break;
869 case Instruction::Or:
870 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
871 return nullptr;
872 [[fallthrough]];
873 case Instruction::Add:
874 break;
875 }
876
877 Value *Op;
878 // Find ctpop.
879 if (!match(I->getOperand(1 - ConstIdx),
881 return nullptr;
882
883 Constant *C;
884 // Check other operand is ImmConstant.
885 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
886 return nullptr;
887
888 Type *Ty = Op->getType();
889 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
890 // Need extra check for icmp. Note if this check is true, it generally means
891 // the icmp will simplify to true/false.
892 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
893 Constant *Cmp =
895 if (!Cmp || !Cmp->isNullValue())
896 return nullptr;
897 }
898
899 // Check we can invert `(not x)` for free.
900 bool Consumes = false;
901 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
902 return nullptr;
903 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
904 assert(NotOp != nullptr &&
905 "Desync between isFreeToInvert and getFreelyInverted");
906
907 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
908
909 Value *R = nullptr;
910
911 // Do the transformation here to avoid potentially introducing an infinite
912 // loop.
913 switch (Opc) {
914 case Instruction::Sub:
915 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
916 break;
917 case Instruction::Or:
918 case Instruction::Add:
919 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
920 break;
921 case Instruction::ICmp:
922 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
923 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
924 break;
925 default:
926 llvm_unreachable("Unhandled Opcode");
927 }
928 assert(R != nullptr);
929 return replaceInstUsesWith(*I, R);
930}
931
932// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
933// IFF
934// 1) the logic_shifts match
935// 2) either both binops are binops and one is `and` or
936// BinOp1 is `and`
937// (logic_shift (inv_logic_shift C1, C), C) == C1 or
938//
939// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
940//
941// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
942// IFF
943// 1) the logic_shifts match
944// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
945//
946// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
947//
948// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
949// IFF
950// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
951// 2) Binop2 is `not`
952//
953// -> (arithmetic_shift Binop1((not X), Y), Amt)
954
956 const DataLayout &DL = I.getDataLayout();
957 auto IsValidBinOpc = [](unsigned Opc) {
958 switch (Opc) {
959 default:
960 return false;
961 case Instruction::And:
962 case Instruction::Or:
963 case Instruction::Xor:
964 case Instruction::Add:
965 // Skip Sub as we only match constant masks which will canonicalize to use
966 // add.
967 return true;
968 }
969 };
970
971 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
972 // constraints.
973 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
974 unsigned ShOpc) {
975 assert(ShOpc != Instruction::AShr);
976 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
977 ShOpc == Instruction::Shl;
978 };
979
980 auto GetInvShift = [](unsigned ShOpc) {
981 assert(ShOpc != Instruction::AShr);
982 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
983 };
984
985 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
986 unsigned ShOpc, Constant *CMask,
987 Constant *CShift) {
988 // If the BinOp1 is `and` we don't need to check the mask.
989 if (BinOpc1 == Instruction::And)
990 return true;
991
992 // For all other possible transfers we need complete distributable
993 // binop/shift (anything but `add` + `lshr`).
994 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
995 return false;
996
997 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
998 // vecs, otherwise the mask will be simplified and the following check will
999 // handle it).
1000 if (BinOpc2 == Instruction::And)
1001 return true;
1002
1003 // Otherwise, need mask that meets the below requirement.
1004 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1005 Constant *MaskInvShift =
1006 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1007 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1008 CMask;
1009 };
1010
1011 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1012 Constant *CMask, *CShift;
1013 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1014 if (!match(I.getOperand(ShOpnum),
1015 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1016 return nullptr;
1017 if (!match(I.getOperand(1 - ShOpnum),
1019 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1020 m_Value(ShiftedX)),
1021 m_Value(Mask))))
1022 return nullptr;
1023 // Make sure we are matching instruction shifts and not ConstantExpr
1024 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1025 auto *IX = dyn_cast<Instruction>(ShiftedX);
1026 if (!IY || !IX)
1027 return nullptr;
1028
1029 // LHS and RHS need same shift opcode
1030 unsigned ShOpc = IY->getOpcode();
1031 if (ShOpc != IX->getOpcode())
1032 return nullptr;
1033
1034 // Make sure binop is real instruction and not ConstantExpr
1035 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1036 if (!BO2)
1037 return nullptr;
1038
1039 unsigned BinOpc = BO2->getOpcode();
1040 // Make sure we have valid binops.
1041 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1042 return nullptr;
1043
1044 if (ShOpc == Instruction::AShr) {
1045 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1046 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1047 Value *NotX = Builder.CreateNot(X);
1048 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1050 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1051 }
1052
1053 return nullptr;
1054 }
1055
1056 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1057 // distribute to drop the shift irrelevant of constants.
1058 if (BinOpc == I.getOpcode() &&
1059 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1060 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1061 Value *NewBinOp1 = Builder.CreateBinOp(
1062 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1063 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1064 }
1065
1066 // Otherwise we can only distribute by constant shifting the mask, so
1067 // ensure we have constants.
1068 if (!match(Shift, m_ImmConstant(CShift)))
1069 return nullptr;
1070 if (!match(Mask, m_ImmConstant(CMask)))
1071 return nullptr;
1072
1073 // Check if we can distribute the binops.
1074 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1075 return nullptr;
1076
1077 Constant *NewCMask =
1078 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1079 Value *NewBinOp2 = Builder.CreateBinOp(
1080 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1081 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1082 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1083 NewBinOp1, CShift);
1084 };
1085
1086 if (Instruction *R = MatchBinOp(0))
1087 return R;
1088 return MatchBinOp(1);
1089}
1090
1091// (Binop (zext C), (select C, T, F))
1092// -> (select C, (binop 1, T), (binop 0, F))
1093//
1094// (Binop (sext C), (select C, T, F))
1095// -> (select C, (binop -1, T), (binop 0, F))
1096//
1097// Attempt to simplify binary operations into a select with folded args, when
1098// one operand of the binop is a select instruction and the other operand is a
1099// zext/sext extension, whose value is the select condition.
1102 // TODO: this simplification may be extended to any speculatable instruction,
1103 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1104 Instruction::BinaryOps Opc = I.getOpcode();
1105 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1106 Value *A, *CondVal, *TrueVal, *FalseVal;
1107 Value *CastOp;
1108
1109 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1110 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1111 A->getType()->getScalarSizeInBits() == 1 &&
1112 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1113 m_Value(FalseVal)));
1114 };
1115
1116 // Make sure one side of the binop is a select instruction, and the other is a
1117 // zero/sign extension operating on a i1.
1118 if (MatchSelectAndCast(LHS, RHS))
1119 CastOp = LHS;
1120 else if (MatchSelectAndCast(RHS, LHS))
1121 CastOp = RHS;
1122 else
1123 return nullptr;
1124
1126 ? nullptr
1127 : cast<SelectInst>(CastOp == LHS ? RHS : LHS);
1128
1129 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1130 bool IsCastOpRHS = (CastOp == RHS);
1131 bool IsZExt = isa<ZExtInst>(CastOp);
1132 Constant *C;
1133
1134 if (IsTrueArm) {
1135 C = Constant::getNullValue(V->getType());
1136 } else if (IsZExt) {
1137 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1138 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1139 } else {
1140 C = Constant::getAllOnesValue(V->getType());
1141 }
1142
1143 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1144 : Builder.CreateBinOp(Opc, C, V);
1145 };
1146
1147 // If the value used in the zext/sext is the select condition, or the negated
1148 // of the select condition, the binop can be simplified.
1149 if (CondVal == A) {
1150 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1151 return SelectInst::Create(CondVal, NewTrueVal,
1152 NewFoldedConst(true, FalseVal), "", nullptr, SI);
1153 }
1154
1155 if (match(A, m_Not(m_Specific(CondVal)))) {
1156 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1157 return SelectInst::Create(CondVal, NewTrueVal,
1158 NewFoldedConst(false, FalseVal), "", nullptr, SI);
1159 }
1160
1161 return nullptr;
1162}
1163
1165 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1168 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1169 Value *A, *B, *C, *D;
1170 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1171
1172 if (Op0)
1173 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1174 if (Op1)
1175 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1176
1177 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1178 // a common term.
1179 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1180 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1181 return V;
1182
1183 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1184 // term.
1185 if (Op0)
1186 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1187 if (Value *V =
1188 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1189 return V;
1190
1191 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1192 // term.
1193 if (Op1)
1194 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1195 if (Value *V =
1196 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1197 return V;
1198
1199 return nullptr;
1200}
1201
1202/// This tries to simplify binary operations which some other binary operation
1203/// distributes over either by factorizing out common terms
1204/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1205/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1206/// Returns the simplified value, or null if it didn't simplify.
1208 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1211 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1212
1213 // Factorization.
1214 if (Value *R = tryFactorizationFolds(I))
1215 return R;
1216
1217 // Expansion.
1218 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1219 // The instruction has the form "(A op' B) op C". See if expanding it out
1220 // to "(A op C) op' (B op C)" results in simplifications.
1221 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1222 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1223
1224 // Disable the use of undef because it's not safe to distribute undef.
1225 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1226 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1227 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1228
1229 // Do "A op C" and "B op C" both simplify?
1230 if (L && R) {
1231 // They do! Return "L op' R".
1232 ++NumExpand;
1233 C = Builder.CreateBinOp(InnerOpcode, L, R);
1234 C->takeName(&I);
1235 return C;
1236 }
1237
1238 // Does "A op C" simplify to the identity value for the inner opcode?
1239 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1240 // They do! Return "B op C".
1241 ++NumExpand;
1242 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1243 C->takeName(&I);
1244 return C;
1245 }
1246
1247 // Does "B op C" simplify to the identity value for the inner opcode?
1248 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1249 // They do! Return "A op C".
1250 ++NumExpand;
1251 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1252 C->takeName(&I);
1253 return C;
1254 }
1255 }
1256
1257 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1258 // The instruction has the form "A op (B op' C)". See if expanding it out
1259 // to "(A op B) op' (A op C)" results in simplifications.
1260 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1261 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1262
1263 // Disable the use of undef because it's not safe to distribute undef.
1264 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1265 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1266 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1267
1268 // Do "A op B" and "A op C" both simplify?
1269 if (L && R) {
1270 // They do! Return "L op' R".
1271 ++NumExpand;
1272 A = Builder.CreateBinOp(InnerOpcode, L, R);
1273 A->takeName(&I);
1274 return A;
1275 }
1276
1277 // Does "A op B" simplify to the identity value for the inner opcode?
1278 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1279 // They do! Return "A op C".
1280 ++NumExpand;
1281 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1282 A->takeName(&I);
1283 return A;
1284 }
1285
1286 // Does "A op C" simplify to the identity value for the inner opcode?
1287 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1288 // They do! Return "A op B".
1289 ++NumExpand;
1290 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1291 A->takeName(&I);
1292 return A;
1293 }
1294 }
1295
1296 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1297}
1298
1299static std::optional<std::pair<Value *, Value *>>
1301 if (LHS->getParent() != RHS->getParent())
1302 return std::nullopt;
1303
1304 if (LHS->getNumIncomingValues() < 2)
1305 return std::nullopt;
1306
1307 if (!equal(LHS->blocks(), RHS->blocks()))
1308 return std::nullopt;
1309
1310 Value *L0 = LHS->getIncomingValue(0);
1311 Value *R0 = RHS->getIncomingValue(0);
1312
1313 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1314 Value *L1 = LHS->getIncomingValue(I);
1315 Value *R1 = RHS->getIncomingValue(I);
1316
1317 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1318 continue;
1319
1320 return std::nullopt;
1321 }
1322
1323 return std::optional(std::pair(L0, R0));
1324}
1325
1326std::optional<std::pair<Value *, Value *>>
1327InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1330 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1331 return std::nullopt;
1332 switch (LHSInst->getOpcode()) {
1333 case Instruction::PHI:
1335 case Instruction::Select: {
1336 Value *Cond = LHSInst->getOperand(0);
1337 Value *TrueVal = LHSInst->getOperand(1);
1338 Value *FalseVal = LHSInst->getOperand(2);
1339 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1340 FalseVal == RHSInst->getOperand(1))
1341 return std::pair(TrueVal, FalseVal);
1342 return std::nullopt;
1343 }
1344 case Instruction::Call: {
1345 // Match min(a, b) and max(a, b)
1346 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1347 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1348 if (LHSMinMax && RHSMinMax &&
1349 LHSMinMax->getPredicate() ==
1351 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1352 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1353 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1354 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1355 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1356 return std::nullopt;
1357 }
1358 default:
1359 return std::nullopt;
1360 }
1361}
1362
1364 Value *LHS,
1365 Value *RHS) {
1366 Value *A, *B, *C, *D, *E, *F;
1367 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1368 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1369 if (!LHSIsSelect && !RHSIsSelect)
1370 return nullptr;
1371
1373 ? nullptr
1374 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1375
1376 FastMathFlags FMF;
1378 if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
1379 FMF = FPOp->getFastMathFlags();
1380 Builder.setFastMathFlags(FMF);
1381 }
1382
1383 Instruction::BinaryOps Opcode = I.getOpcode();
1384 SimplifyQuery Q = SQ.getWithInstruction(&I);
1385
1386 Value *Cond, *True = nullptr, *False = nullptr;
1387
1388 // Special-case for add/negate combination. Replace the zero in the negation
1389 // with the trailing add operand:
1390 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1391 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1392 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1393 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1394 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1395 return nullptr;
1396 Value *N;
1397 if (True && match(FVal, m_Neg(m_Value(N)))) {
1398 Value *Sub = Builder.CreateSub(Z, N);
1399 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1400 }
1401 if (False && match(TVal, m_Neg(m_Value(N)))) {
1402 Value *Sub = Builder.CreateSub(Z, N);
1403 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1404 }
1405 return nullptr;
1406 };
1407
1408 if (LHSIsSelect && RHSIsSelect && A == D) {
1409 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1410 Cond = A;
1411 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1412 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1413
1414 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1415 if (False && !True)
1416 True = Builder.CreateBinOp(Opcode, B, E);
1417 else if (True && !False)
1418 False = Builder.CreateBinOp(Opcode, C, F);
1419 }
1420 } else if (LHSIsSelect && LHS->hasOneUse()) {
1421 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1422 Cond = A;
1423 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1424 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1425 if (Value *NewSel = foldAddNegate(B, C, RHS))
1426 return NewSel;
1427 } else if (RHSIsSelect && RHS->hasOneUse()) {
1428 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1429 Cond = D;
1430 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1431 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1432 if (Value *NewSel = foldAddNegate(E, F, LHS))
1433 return NewSel;
1434 }
1435
1436 if (!True || !False)
1437 return nullptr;
1438
1439 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1440 NewSI->takeName(&I);
1441 return NewSI;
1442}
1443
1444/// Freely adapt every user of V as-if V was changed to !V.
1445/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1447 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1448 for (User *U : make_early_inc_range(I->users())) {
1449 if (U == IgnoredUser)
1450 continue; // Don't consider this user.
1451 switch (cast<Instruction>(U)->getOpcode()) {
1452 case Instruction::Select: {
1453 auto *SI = cast<SelectInst>(U);
1454 SI->swapValues();
1455 SI->swapProfMetadata();
1456 break;
1457 }
1458 case Instruction::Br: {
1460 BI->swapSuccessors(); // swaps prof metadata too
1461 if (BPI)
1462 BPI->swapSuccEdgesProbabilities(BI->getParent());
1463 break;
1464 }
1465 case Instruction::Xor:
1467 // Add to worklist for DCE.
1469 break;
1470 default:
1471 llvm_unreachable("Got unexpected user - out of sync with "
1472 "canFreelyInvertAllUsersOf() ?");
1473 }
1474 }
1475
1476 // Update pre-existing debug value uses.
1477 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1478 llvm::findDbgValues(I, DbgVariableRecords);
1479
1480 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1481 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1482 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1483 Idx != End; ++Idx)
1484 if (DbgVal->getVariableLocationOp(Idx) == I)
1485 DbgVal->setExpression(
1486 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1487 }
1488}
1489
1490/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1491/// constant zero (which is the 'negate' form).
1492Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1493 Value *NegV;
1494 if (match(V, m_Neg(m_Value(NegV))))
1495 return NegV;
1496
1497 // Constants can be considered to be negated values if they can be folded.
1499 return ConstantExpr::getNeg(C);
1500
1502 if (C->getType()->getElementType()->isIntegerTy())
1503 return ConstantExpr::getNeg(C);
1504
1506 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1507 Constant *Elt = CV->getAggregateElement(i);
1508 if (!Elt)
1509 return nullptr;
1510
1511 if (isa<UndefValue>(Elt))
1512 continue;
1513
1514 if (!isa<ConstantInt>(Elt))
1515 return nullptr;
1516 }
1517 return ConstantExpr::getNeg(CV);
1518 }
1519
1520 // Negate integer vector splats.
1521 if (auto *CV = dyn_cast<Constant>(V))
1522 if (CV->getType()->isVectorTy() &&
1523 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1524 return ConstantExpr::getNeg(CV);
1525
1526 return nullptr;
1527}
1528
1529// Try to fold:
1530// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1531// -> ({s|u}itofp (int_binop x, y))
1532// 2) (fp_binop ({s|u}itofp x), FpC)
1533// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1534//
1535// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1536Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1537 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1539
1540 Type *FPTy = BO.getType();
1541 Type *IntTy = IntOps[0]->getType();
1542
1543 unsigned IntSz = IntTy->getScalarSizeInBits();
1544 // This is the maximum number of inuse bits by the integer where the int -> fp
1545 // casts are exact.
1546 unsigned MaxRepresentableBits =
1548
1549 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1550 // checks later on.
1551 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1552
1553 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1554 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1555 auto IsNonZero = [&](unsigned OpNo) -> bool {
1556 if (OpsKnown[OpNo].hasKnownBits() &&
1557 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1558 return true;
1559 return isKnownNonZero(IntOps[OpNo], SQ);
1560 };
1561
1562 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1563 // NB: This matches the impl in ValueTracking, we just try to use cached
1564 // knownbits here. If we ever start supporting WithCache for
1565 // `isKnownNonNegative`, change this to an explicit call.
1566 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1567 };
1568
1569 // Check if we know for certain that ({s|u}itofp op) is exact.
1570 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1571 // Can we treat this operand as the desired sign?
1572 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1573 !IsNonNeg(OpNo))
1574 return false;
1575
1576 // If fp precision >= bitwidth(op) then its exact.
1577 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1578 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1579 // handled specially. We can't, however, increase the bound arbitrarily for
1580 // `sitofp` as for larger sizes, it won't sign extend.
1581 if (MaxRepresentableBits < IntSz) {
1582 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1583 // numSignBits(op).
1584 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1585 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1586 if (OpsFromSigned)
1587 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1588 // Finally for unsigned check that fp precision >= bitwidth(op) -
1589 // numLeadingZeros(op).
1590 else {
1591 NumUsedLeadingBits[OpNo] =
1592 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1593 }
1594 }
1595 // NB: We could also check if op is known to be a power of 2 or zero (which
1596 // will always be representable). Its unlikely, however, that is we are
1597 // unable to bound op in any way we will be able to pass the overflow checks
1598 // later on.
1599
1600 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1601 return false;
1602 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1603 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1604 IsNonZero(OpNo);
1605 };
1606
1607 // If we have a constant rhs, see if we can losslessly convert it to an int.
1608 if (Op1FpC != nullptr) {
1609 // Signed + Mul req non-zero
1610 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1611 !match(Op1FpC, m_NonZeroFP()))
1612 return nullptr;
1613
1615 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1616 IntTy, DL);
1617 if (Op1IntC == nullptr)
1618 return nullptr;
1619 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1620 : Instruction::UIToFP,
1621 Op1IntC, FPTy, DL) != Op1FpC)
1622 return nullptr;
1623
1624 // First try to keep sign of cast the same.
1625 IntOps[1] = Op1IntC;
1626 }
1627
1628 // Ensure lhs/rhs integer types match.
1629 if (IntTy != IntOps[1]->getType())
1630 return nullptr;
1631
1632 if (Op1FpC == nullptr) {
1633 if (!IsValidPromotion(1))
1634 return nullptr;
1635 }
1636 if (!IsValidPromotion(0))
1637 return nullptr;
1638
1639 // Final we check if the integer version of the binop will not overflow.
1641 // Because of the precision check, we can often rule out overflows.
1642 bool NeedsOverflowCheck = true;
1643 // Try to conservatively rule out overflow based on the already done precision
1644 // checks.
1645 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1646 unsigned OverflowMaxCurBits =
1647 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1648 bool OutputSigned = OpsFromSigned;
1649 switch (BO.getOpcode()) {
1650 case Instruction::FAdd:
1651 IntOpc = Instruction::Add;
1652 OverflowMaxOutputBits += OverflowMaxCurBits;
1653 break;
1654 case Instruction::FSub:
1655 IntOpc = Instruction::Sub;
1656 OverflowMaxOutputBits += OverflowMaxCurBits;
1657 break;
1658 case Instruction::FMul:
1659 IntOpc = Instruction::Mul;
1660 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1661 break;
1662 default:
1663 llvm_unreachable("Unsupported binop");
1664 }
1665 // The precision check may have already ruled out overflow.
1666 if (OverflowMaxOutputBits < IntSz) {
1667 NeedsOverflowCheck = false;
1668 // We can bound unsigned overflow from sub to in range signed value (this is
1669 // what allows us to avoid the overflow check for sub).
1670 if (IntOpc == Instruction::Sub)
1671 OutputSigned = true;
1672 }
1673
1674 // Precision check did not rule out overflow, so need to check.
1675 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1676 // `IntOps[...]` arguments to `KnownOps[...]`.
1677 if (NeedsOverflowCheck &&
1678 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1679 return nullptr;
1680
1681 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1682 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1683 IntBO->setHasNoSignedWrap(OutputSigned);
1684 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1685 }
1686 if (OutputSigned)
1687 return new SIToFPInst(IntBinOp, FPTy);
1688 return new UIToFPInst(IntBinOp, FPTy);
1689}
1690
1691// Try to fold:
1692// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1693// -> ({s|u}itofp (int_binop x, y))
1694// 2) (fp_binop ({s|u}itofp x), FpC)
1695// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1696Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1697 // Don't perform the fold on vectors, as the integer operation may be much
1698 // more expensive than the float operation in that case.
1699 if (BO.getType()->isVectorTy())
1700 return nullptr;
1701
1702 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1703 Constant *Op1FpC = nullptr;
1704 // Check for:
1705 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1706 // 2) (binop ({s|u}itofp x), FpC)
1707 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1708 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1709 return nullptr;
1710
1711 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1712 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1713 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1714 return nullptr;
1715
1716 // Cache KnownBits a bit to potentially save some analysis.
1717 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1718
1719 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1720 // different constraints depending on the sign of the cast.
1721 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1722 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1723 IntOps, Op1FpC, OpsKnown))
1724 return R;
1725 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1726 Op1FpC, OpsKnown);
1727}
1728
1729/// A binop with a constant operand and a sign-extended boolean operand may be
1730/// converted into a select of constants by applying the binary operation to
1731/// the constant with the two possible values of the extended boolean (0 or -1).
1732Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1733 // TODO: Handle non-commutative binop (constant is operand 0).
1734 // TODO: Handle zext.
1735 // TODO: Peek through 'not' of cast.
1736 Value *BO0 = BO.getOperand(0);
1737 Value *BO1 = BO.getOperand(1);
1738 Value *X;
1739 Constant *C;
1740 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1741 !X->getType()->isIntOrIntVectorTy(1))
1742 return nullptr;
1743
1744 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1747 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1748 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1749 return createSelectInstWithUnknownProfile(X, TVal, FVal);
1750}
1751
1753 bool IsTrueArm) {
1755 for (Value *Op : I.operands()) {
1756 Value *V = nullptr;
1757 if (Op == SI) {
1758 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1759 } else if (match(SI->getCondition(),
1762 m_Specific(Op), m_Value(V))) &&
1764 // Pass
1765 } else if (match(Op, m_ZExt(m_Specific(SI->getCondition())))) {
1766 V = IsTrueArm ? ConstantInt::get(Op->getType(), 1)
1767 : ConstantInt::getNullValue(Op->getType());
1768 } else {
1769 V = Op;
1770 }
1771 Ops.push_back(V);
1772 }
1773
1774 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1775}
1776
1778 Value *NewOp, InstCombiner &IC) {
1779 Instruction *Clone = I.clone();
1780 Clone->replaceUsesOfWith(SI, NewOp);
1782 IC.InsertNewInstBefore(Clone, I.getIterator());
1783 return Clone;
1784}
1785
1787 bool FoldWithMultiUse,
1788 bool SimplifyBothArms) {
1789 // Don't modify shared select instructions unless set FoldWithMultiUse
1790 if (!SI->hasOneUser() && !FoldWithMultiUse)
1791 return nullptr;
1792
1793 Value *TV = SI->getTrueValue();
1794 Value *FV = SI->getFalseValue();
1795
1796 // Bool selects with constant operands can be folded to logical ops.
1797 if (SI->getType()->isIntOrIntVectorTy(1))
1798 return nullptr;
1799
1800 // Avoid breaking min/max reduction pattern,
1801 // which is necessary for vectorization later.
1803 for (Value *IntrinOp : Op.operands())
1804 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1805 for (Value *PhiOp : PN->operands())
1806 if (PhiOp == &Op)
1807 return nullptr;
1808
1809 // Test if a FCmpInst instruction is used exclusively by a select as
1810 // part of a minimum or maximum operation. If so, refrain from doing
1811 // any other folding. This helps out other analyses which understand
1812 // non-obfuscated minimum and maximum idioms. And in this case, at
1813 // least one of the comparison operands has at least one user besides
1814 // the compare (the select), which would often largely negate the
1815 // benefit of folding anyway.
1816 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1817 if (CI->hasOneUse()) {
1818 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1819 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1820 !CI->isCommutative())
1821 return nullptr;
1822 }
1823 }
1824
1825 // Make sure that one of the select arms folds successfully.
1826 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1827 Value *NewFV =
1828 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1829 if (!NewTV && !NewFV)
1830 return nullptr;
1831
1832 if (SimplifyBothArms && !(NewTV && NewFV))
1833 return nullptr;
1834
1835 // Create an instruction for the arm that did not fold.
1836 if (!NewTV)
1837 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1838 if (!NewFV)
1839 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1840 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1841}
1842
1844 Value *InValue, BasicBlock *InBB,
1845 const DataLayout &DL,
1846 const SimplifyQuery SQ) {
1847 // NB: It is a precondition of this transform that the operands be
1848 // phi translatable!
1850 for (Value *Op : I.operands()) {
1851 if (Op == PN)
1852 Ops.push_back(InValue);
1853 else
1854 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1855 }
1856
1857 // Don't consider the simplification successful if we get back a constant
1858 // expression. That's just an instruction in hiding.
1859 // Also reject the case where we simplify back to the phi node. We wouldn't
1860 // be able to remove it in that case.
1862 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1863 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1864 return NewVal;
1865
1866 // Check if incoming PHI value can be replaced with constant
1867 // based on implied condition.
1868 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1869 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1870 if (TerminatorBI && TerminatorBI->isConditional() &&
1871 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1872 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1873 std::optional<bool> ImpliedCond = isImpliedCondition(
1874 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1875 DL, LHSIsTrue);
1876 if (ImpliedCond)
1877 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1878 }
1879
1880 return nullptr;
1881}
1882
1883/// In some cases it is beneficial to fold a select into a binary operator.
1884/// For example:
1885/// %1 = or %in, 4
1886/// %2 = select %cond, %1, %in
1887/// %3 = or %2, 1
1888/// =>
1889/// %1 = select i1 %cond, 5, 1
1890/// %2 = or %1, %in
1892 assert(Op.isAssociative() && "The operation must be associative!");
1893
1894 SelectInst *SI = dyn_cast<SelectInst>(Op.getOperand(0));
1895
1896 Constant *Const;
1897 if (!SI || !match(Op.getOperand(1), m_ImmConstant(Const)) ||
1898 !Op.hasOneUse() || !SI->hasOneUse())
1899 return nullptr;
1900
1901 Value *TV = SI->getTrueValue();
1902 Value *FV = SI->getFalseValue();
1903 Value *Input, *NewTV, *NewFV;
1904 Constant *Const2;
1905
1906 if (TV->hasOneUse() && match(TV, m_BinOp(Op.getOpcode(), m_Specific(FV),
1907 m_ImmConstant(Const2)))) {
1908 NewTV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1909 NewFV = Const;
1910 Input = FV;
1911 } else if (FV->hasOneUse() &&
1912 match(FV, m_BinOp(Op.getOpcode(), m_Specific(TV),
1913 m_ImmConstant(Const2)))) {
1914 NewTV = Const;
1915 NewFV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1916 Input = TV;
1917 } else
1918 return nullptr;
1919
1920 if (!NewTV || !NewFV)
1921 return nullptr;
1922
1923 Value *NewSI =
1924 Builder.CreateSelect(SI->getCondition(), NewTV, NewFV, "",
1925 ProfcheckDisableMetadataFixes ? nullptr : SI);
1926 return BinaryOperator::Create(Op.getOpcode(), NewSI, Input);
1927}
1928
1930 bool AllowMultipleUses) {
1931 unsigned NumPHIValues = PN->getNumIncomingValues();
1932 if (NumPHIValues == 0)
1933 return nullptr;
1934
1935 // We normally only transform phis with a single use. However, if a PHI has
1936 // multiple uses and they are all the same operation, we can fold *all* of the
1937 // uses into the PHI.
1938 bool OneUse = PN->hasOneUse();
1939 bool IdenticalUsers = false;
1940 if (!AllowMultipleUses && !OneUse) {
1941 // Walk the use list for the instruction, comparing them to I.
1942 for (User *U : PN->users()) {
1944 if (UI != &I && !I.isIdenticalTo(UI))
1945 return nullptr;
1946 }
1947 // Otherwise, we can replace *all* users with the new PHI we form.
1948 IdenticalUsers = true;
1949 }
1950
1951 // Check that all operands are phi-translatable.
1952 for (Value *Op : I.operands()) {
1953 if (Op == PN)
1954 continue;
1955
1956 // Non-instructions never require phi-translation.
1957 auto *I = dyn_cast<Instruction>(Op);
1958 if (!I)
1959 continue;
1960
1961 // Phi-translate can handle phi nodes in the same block.
1962 if (isa<PHINode>(I))
1963 if (I->getParent() == PN->getParent())
1964 continue;
1965
1966 // Operand dominates the block, no phi-translation necessary.
1967 if (DT.dominates(I, PN->getParent()))
1968 continue;
1969
1970 // Not phi-translatable, bail out.
1971 return nullptr;
1972 }
1973
1974 // Check to see whether the instruction can be folded into each phi operand.
1975 // If there is one operand that does not fold, remember the BB it is in.
1976 SmallVector<Value *> NewPhiValues;
1977 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1978 bool SeenNonSimplifiedInVal = false;
1979 for (unsigned i = 0; i != NumPHIValues; ++i) {
1980 Value *InVal = PN->getIncomingValue(i);
1981 BasicBlock *InBB = PN->getIncomingBlock(i);
1982
1983 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1984 NewPhiValues.push_back(NewVal);
1985 continue;
1986 }
1987
1988 // Handle some cases that can't be fully simplified, but where we know that
1989 // the two instructions will fold into one.
1990 auto WillFold = [&]() {
1991 if (!InVal->hasUseList() || !InVal->hasOneUser())
1992 return false;
1993
1994 // icmp of ucmp/scmp with constant will fold to icmp.
1995 const APInt *Ignored;
1996 if (isa<CmpIntrinsic>(InVal) &&
1997 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1998 return true;
1999
2000 // icmp eq zext(bool), 0 will fold to !bool.
2001 if (isa<ZExtInst>(InVal) &&
2002 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
2003 match(&I,
2005 return true;
2006
2007 return false;
2008 };
2009
2010 if (WillFold()) {
2011 OpsToMoveUseToIncomingBB.push_back(i);
2012 NewPhiValues.push_back(nullptr);
2013 continue;
2014 }
2015
2016 if (!OneUse && !IdenticalUsers)
2017 return nullptr;
2018
2019 if (SeenNonSimplifiedInVal)
2020 return nullptr; // More than one non-simplified value.
2021 SeenNonSimplifiedInVal = true;
2022
2023 // If there is exactly one non-simplified value, we can insert a copy of the
2024 // operation in that block. However, if this is a critical edge, we would
2025 // be inserting the computation on some other paths (e.g. inside a loop).
2026 // Only do this if the pred block is unconditionally branching into the phi
2027 // block. Also, make sure that the pred block is not dead code.
2029 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
2030 return nullptr;
2031
2032 NewPhiValues.push_back(nullptr);
2033 OpsToMoveUseToIncomingBB.push_back(i);
2034
2035 // Do not push the operation across a loop backedge. This could result in
2036 // an infinite combine loop, and is generally non-profitable (especially
2037 // if the operation was originally outside the loop).
2038 if (isBackEdge(InBB, PN->getParent()))
2039 return nullptr;
2040 }
2041
2042 // Clone the instruction that uses the phi node and move it into the incoming
2043 // BB because we know that the next iteration of InstCombine will simplify it.
2045 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
2047 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
2048
2049 Instruction *Clone = Clones.lookup(OpBB);
2050 if (!Clone) {
2051 Clone = I.clone();
2052 for (Use &U : Clone->operands()) {
2053 if (U == PN)
2054 U = Op;
2055 else
2056 U = U->DoPHITranslation(PN->getParent(), OpBB);
2057 }
2058 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2059 Clones.insert({OpBB, Clone});
2060 // We may have speculated the instruction.
2062 }
2063
2064 NewPhiValues[OpIndex] = Clone;
2065 }
2066
2067 // Okay, we can do the transformation: create the new PHI node.
2068 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2069 InsertNewInstBefore(NewPN, PN->getIterator());
2070 NewPN->takeName(PN);
2071 NewPN->setDebugLoc(PN->getDebugLoc());
2072
2073 for (unsigned i = 0; i != NumPHIValues; ++i)
2074 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2075
2076 if (IdenticalUsers) {
2077 // Collect and deduplicate users up-front to avoid iterator invalidation.
2079 for (User *U : PN->users()) {
2081 if (User == &I)
2082 continue;
2083 ToReplace.insert(User);
2084 }
2085 for (Instruction *I : ToReplace) {
2086 replaceInstUsesWith(*I, NewPN);
2088 }
2089 OneUse = true;
2090 }
2091
2092 if (OneUse) {
2093 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2094 }
2095 return replaceInstUsesWith(I, NewPN);
2096}
2097
2099 if (!BO.isAssociative())
2100 return nullptr;
2101
2102 // Find the interleaved binary ops.
2103 auto Opc = BO.getOpcode();
2104 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2105 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2106 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2107 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2108 !BO0->isAssociative() || !BO1->isAssociative() ||
2109 BO0->getParent() != BO1->getParent())
2110 return nullptr;
2111
2112 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2113 "Expected commutative instructions!");
2114
2115 // Find the matching phis, forming the recurrences.
2116 PHINode *PN0, *PN1;
2117 Value *Start0, *Step0, *Start1, *Step1;
2118 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2119 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2120 PN0->getParent() != PN1->getParent())
2121 return nullptr;
2122
2123 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2124 "Expected PHIs with two incoming values!");
2125
2126 // Convert the start and step values to constants.
2127 auto *Init0 = dyn_cast<Constant>(Start0);
2128 auto *Init1 = dyn_cast<Constant>(Start1);
2129 auto *C0 = dyn_cast<Constant>(Step0);
2130 auto *C1 = dyn_cast<Constant>(Step1);
2131 if (!Init0 || !Init1 || !C0 || !C1)
2132 return nullptr;
2133
2134 // Fold the recurrence constants.
2135 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2136 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2137 if (!Init || !C)
2138 return nullptr;
2139
2140 // Create the reduced PHI.
2141 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2142 "reduced.phi");
2143
2144 // Create the new binary op.
2145 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2146 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2147 // Intersect FMF flags for FADD and FMUL.
2148 FastMathFlags Intersect = BO0->getFastMathFlags() &
2149 BO1->getFastMathFlags() & BO.getFastMathFlags();
2150 NewBO->setFastMathFlags(Intersect);
2151 } else {
2152 OverflowTracking Flags;
2153 Flags.AllKnownNonNegative = false;
2154 Flags.AllKnownNonZero = false;
2155 Flags.mergeFlags(*BO0);
2156 Flags.mergeFlags(*BO1);
2157 Flags.mergeFlags(BO);
2158 Flags.applyFlags(*NewBO);
2159 }
2160 NewBO->takeName(&BO);
2161
2162 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2163 auto *V = PN0->getIncomingValue(I);
2164 auto *BB = PN0->getIncomingBlock(I);
2165 if (V == Init0) {
2166 assert(((PN1->getIncomingValue(0) == Init1 &&
2167 PN1->getIncomingBlock(0) == BB) ||
2168 (PN1->getIncomingValue(1) == Init1 &&
2169 PN1->getIncomingBlock(1) == BB)) &&
2170 "Invalid incoming block!");
2171 NewPN->addIncoming(Init, BB);
2172 } else if (V == BO0) {
2173 assert(((PN1->getIncomingValue(0) == BO1 &&
2174 PN1->getIncomingBlock(0) == BB) ||
2175 (PN1->getIncomingValue(1) == BO1 &&
2176 PN1->getIncomingBlock(1) == BB)) &&
2177 "Invalid incoming block!");
2178 NewPN->addIncoming(NewBO, BB);
2179 } else
2180 llvm_unreachable("Unexpected incoming value!");
2181 }
2182
2183 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2184 << "\n with " << *PN1 << "\n " << *BO1
2185 << '\n');
2186
2187 // Insert the new recurrence and remove the old (dead) ones.
2188 InsertNewInstWith(NewPN, PN0->getIterator());
2189 InsertNewInstWith(NewBO, BO0->getIterator());
2190
2197
2198 return replaceInstUsesWith(BO, NewBO);
2199}
2200
2202 // Attempt to fold binary operators whose operands are simple recurrences.
2203 if (auto *NewBO = foldBinopWithRecurrence(BO))
2204 return NewBO;
2205
2206 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2207 // we are guarding against replicating the binop in >1 predecessor.
2208 // This could miss matching a phi with 2 constant incoming values.
2209 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2210 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2211 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2212 Phi0->getNumOperands() != Phi1->getNumOperands())
2213 return nullptr;
2214
2215 // TODO: Remove the restriction for binop being in the same block as the phis.
2216 if (BO.getParent() != Phi0->getParent() ||
2217 BO.getParent() != Phi1->getParent())
2218 return nullptr;
2219
2220 // Fold if there is at least one specific constant value in phi0 or phi1's
2221 // incoming values that comes from the same block and this specific constant
2222 // value can be used to do optimization for specific binary operator.
2223 // For example:
2224 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2225 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2226 // %add = add i32 %phi0, %phi1
2227 // ==>
2228 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2230 /*AllowRHSConstant*/ false);
2231 if (C) {
2232 SmallVector<Value *, 4> NewIncomingValues;
2233 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2234 auto &Phi0Use = std::get<0>(T);
2235 auto &Phi1Use = std::get<1>(T);
2236 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2237 return false;
2238 Value *Phi0UseV = Phi0Use.get();
2239 Value *Phi1UseV = Phi1Use.get();
2240 if (Phi0UseV == C)
2241 NewIncomingValues.push_back(Phi1UseV);
2242 else if (Phi1UseV == C)
2243 NewIncomingValues.push_back(Phi0UseV);
2244 else
2245 return false;
2246 return true;
2247 };
2248
2249 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2250 CanFoldIncomingValuePair)) {
2251 PHINode *NewPhi =
2252 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2253 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2254 "The number of collected incoming values should equal the number "
2255 "of the original PHINode operands!");
2256 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2257 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2258 return NewPhi;
2259 }
2260 }
2261
2262 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2263 return nullptr;
2264
2265 // Match a pair of incoming constants for one of the predecessor blocks.
2266 BasicBlock *ConstBB, *OtherBB;
2267 Constant *C0, *C1;
2268 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2269 ConstBB = Phi0->getIncomingBlock(0);
2270 OtherBB = Phi0->getIncomingBlock(1);
2271 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2272 ConstBB = Phi0->getIncomingBlock(1);
2273 OtherBB = Phi0->getIncomingBlock(0);
2274 } else {
2275 return nullptr;
2276 }
2277 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2278 return nullptr;
2279
2280 // The block that we are hoisting to must reach here unconditionally.
2281 // Otherwise, we could be speculatively executing an expensive or
2282 // non-speculative op.
2283 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2284 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2285 !DT.isReachableFromEntry(OtherBB))
2286 return nullptr;
2287
2288 // TODO: This check could be tightened to only apply to binops (div/rem) that
2289 // are not safe to speculatively execute. But that could allow hoisting
2290 // potentially expensive instructions (fdiv for example).
2291 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2293 return nullptr;
2294
2295 // Fold constants for the predecessor block with constant incoming values.
2296 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2297 if (!NewC)
2298 return nullptr;
2299
2300 // Make a new binop in the predecessor block with the non-constant incoming
2301 // values.
2302 Builder.SetInsertPoint(PredBlockBranch);
2303 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2304 Phi0->getIncomingValueForBlock(OtherBB),
2305 Phi1->getIncomingValueForBlock(OtherBB));
2306 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2307 NotFoldedNewBO->copyIRFlags(&BO);
2308
2309 // Replace the binop with a phi of the new values. The old phis are dead.
2310 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2311 NewPhi->addIncoming(NewBO, OtherBB);
2312 NewPhi->addIncoming(NewC, ConstBB);
2313 return NewPhi;
2314}
2315
2317 bool IsOtherParamConst = isa<Constant>(I.getOperand(1));
2318
2319 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2320 if (Instruction *NewSel =
2321 FoldOpIntoSelect(I, Sel, false, !IsOtherParamConst))
2322 return NewSel;
2323 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2324 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2325 return NewPhi;
2326 }
2327 return nullptr;
2328}
2329
2331 // If this GEP has only 0 indices, it is the same pointer as
2332 // Src. If Src is not a trivial GEP too, don't combine
2333 // the indices.
2334 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2335 !Src.hasOneUse())
2336 return false;
2337 return true;
2338}
2339
2340/// Find a constant NewC that has property:
2341/// shuffle(NewC, ShMask) = C
2342/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2343///
2344/// A 1-to-1 mapping is not required. Example:
2345/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2347 VectorType *NewCTy) {
2348 if (isa<ScalableVectorType>(NewCTy)) {
2349 Constant *Splat = C->getSplatValue();
2350 if (!Splat)
2351 return nullptr;
2353 }
2354
2355 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2356 cast<FixedVectorType>(C->getType())->getNumElements())
2357 return nullptr;
2358
2359 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2360 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2361 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2362 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2363 for (unsigned I = 0; I < NumElts; ++I) {
2364 Constant *CElt = C->getAggregateElement(I);
2365 if (ShMask[I] >= 0) {
2366 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2367 Constant *NewCElt = NewVecC[ShMask[I]];
2368 // Bail out if:
2369 // 1. The constant vector contains a constant expression.
2370 // 2. The shuffle needs an element of the constant vector that can't
2371 // be mapped to a new constant vector.
2372 // 3. This is a widening shuffle that copies elements of V1 into the
2373 // extended elements (extending with poison is allowed).
2374 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2375 I >= NewCNumElts)
2376 return nullptr;
2377 NewVecC[ShMask[I]] = CElt;
2378 }
2379 }
2380 return ConstantVector::get(NewVecC);
2381}
2382
2383// Get the result of `Vector Op Splat` (or Splat Op Vector if \p SplatLHS).
2385 Constant *Splat, bool SplatLHS,
2386 const DataLayout &DL) {
2387 ElementCount EC = cast<VectorType>(Vector->getType())->getElementCount();
2389 Constant *RHS = Vector;
2390 if (!SplatLHS)
2391 std::swap(LHS, RHS);
2392 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
2393}
2394
2395template <Intrinsic::ID SpliceID>
2397 InstCombiner::BuilderTy &Builder) {
2398 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2399 auto CreateBinOpSplice = [&](Value *X, Value *Y, Value *Offset) {
2400 Value *V = Builder.CreateBinOp(Inst.getOpcode(), X, Y, Inst.getName());
2401 if (auto *BO = dyn_cast<BinaryOperator>(V))
2402 BO->copyIRFlags(&Inst);
2403 Module *M = Inst.getModule();
2404 Function *F = Intrinsic::getOrInsertDeclaration(M, SpliceID, V->getType());
2405 return CallInst::Create(F, {V, PoisonValue::get(V->getType()), Offset});
2406 };
2407 Value *V1, *V2, *Offset;
2408 if (match(LHS,
2410 // Op(splice(V1, poison, offset), splice(V2, poison, offset))
2411 // -> splice(Op(V1, V2), poison, offset)
2413 m_Specific(Offset))) &&
2414 (LHS->hasOneUse() || RHS->hasOneUse() ||
2415 (LHS == RHS && LHS->hasNUses(2))))
2416 return CreateBinOpSplice(V1, V2, Offset);
2417
2418 // Op(splice(V1, poison, offset), RHSSplat)
2419 // -> splice(Op(V1, RHSSplat), poison, offset)
2420 if (LHS->hasOneUse() && isSplatValue(RHS))
2421 return CreateBinOpSplice(V1, RHS, Offset);
2422 }
2423 // Op(LHSSplat, splice(V2, poison, offset))
2424 // -> splice(Op(LHSSplat, V2), poison, offset)
2425 else if (isSplatValue(LHS) &&
2427 m_Value(Offset)))))
2428 return CreateBinOpSplice(LHS, V2, Offset);
2429
2430 // TODO: Fold binops of the form
2431 // Op(splice(poison, V1, offset), splice(poison, V2, offset))
2432 // -> splice(poison, Op(V1, V2), offset)
2433
2434 return nullptr;
2435}
2436
2438 if (!isa<VectorType>(Inst.getType()))
2439 return nullptr;
2440
2441 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2442 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2443 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2444 cast<VectorType>(Inst.getType())->getElementCount());
2445 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2446 cast<VectorType>(Inst.getType())->getElementCount());
2447
2448 auto foldConstantsThroughSubVectorInsertSplat =
2449 [&](Value *MaybeSubVector, Value *MaybeSplat,
2450 bool SplatLHS) -> Instruction * {
2451 Value *Idx;
2452 Constant *Splat, *SubVector, *Dest;
2453 if (!match(MaybeSplat, m_ConstantSplat(m_Constant(Splat))) ||
2454 !match(MaybeSubVector,
2455 m_VectorInsert(m_Constant(Dest), m_Constant(SubVector),
2456 m_Value(Idx))))
2457 return nullptr;
2458 SubVector =
2459 constantFoldBinOpWithSplat(Opcode, SubVector, Splat, SplatLHS, DL);
2460 Dest = constantFoldBinOpWithSplat(Opcode, Dest, Splat, SplatLHS, DL);
2461 if (!SubVector || !Dest)
2462 return nullptr;
2463 auto *InsertVector =
2464 Builder.CreateInsertVector(Dest->getType(), Dest, SubVector, Idx);
2465 return replaceInstUsesWith(Inst, InsertVector);
2466 };
2467
2468 // If one operand is a constant splat and the other operand is a
2469 // `vector.insert` where both the destination and subvector are constant,
2470 // apply the operation to both the destination and subvector, returning a new
2471 // constant `vector.insert`. This helps constant folding for scalable vectors.
2472 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2473 /*MaybeSubVector=*/LHS, /*MaybeSplat=*/RHS, /*SplatLHS=*/false))
2474 return Folded;
2475 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2476 /*MaybeSubVector=*/RHS, /*MaybeSplat=*/LHS, /*SplatLHS=*/true))
2477 return Folded;
2478
2479 // If both operands of the binop are vector concatenations, then perform the
2480 // narrow binop on each pair of the source operands followed by concatenation
2481 // of the results.
2482 Value *L0, *L1, *R0, *R1;
2483 ArrayRef<int> Mask;
2484 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2485 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2486 LHS->hasOneUse() && RHS->hasOneUse() &&
2487 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2488 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2489 // This transform does not have the speculative execution constraint as
2490 // below because the shuffle is a concatenation. The new binops are
2491 // operating on exactly the same elements as the existing binop.
2492 // TODO: We could ease the mask requirement to allow different undef lanes,
2493 // but that requires an analysis of the binop-with-undef output value.
2494 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2495 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2496 BO->copyIRFlags(&Inst);
2497 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2498 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2499 BO->copyIRFlags(&Inst);
2500 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2501 }
2502
2503 auto createBinOpReverse = [&](Value *X, Value *Y) {
2504 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2505 if (auto *BO = dyn_cast<BinaryOperator>(V))
2506 BO->copyIRFlags(&Inst);
2507 Module *M = Inst.getModule();
2509 M, Intrinsic::vector_reverse, V->getType());
2510 return CallInst::Create(F, V);
2511 };
2512
2513 // NOTE: Reverse shuffles don't require the speculative execution protection
2514 // below because they don't affect which lanes take part in the computation.
2515
2516 Value *V1, *V2;
2517 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2518 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2519 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2520 (LHS->hasOneUse() || RHS->hasOneUse() ||
2521 (LHS == RHS && LHS->hasNUses(2))))
2522 return createBinOpReverse(V1, V2);
2523
2524 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2525 if (LHS->hasOneUse() && isSplatValue(RHS))
2526 return createBinOpReverse(V1, RHS);
2527 }
2528 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2529 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2530 return createBinOpReverse(LHS, V2);
2531
2532 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2533 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2534 if (auto *BO = dyn_cast<BinaryOperator>(V))
2535 BO->copyIRFlags(&Inst);
2536
2537 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2538 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2539 Module *M = Inst.getModule();
2541 M, Intrinsic::experimental_vp_reverse, V->getType());
2542 return CallInst::Create(F, {V, AllTrueMask, EVL});
2543 };
2544
2545 Value *EVL;
2547 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2548 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2550 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2551 (LHS->hasOneUse() || RHS->hasOneUse() ||
2552 (LHS == RHS && LHS->hasNUses(2))))
2553 return createBinOpVPReverse(V1, V2, EVL);
2554
2555 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2556 if (LHS->hasOneUse() && isSplatValue(RHS))
2557 return createBinOpVPReverse(V1, RHS, EVL);
2558 }
2559 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2560 else if (isSplatValue(LHS) &&
2562 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2563 return createBinOpVPReverse(LHS, V2, EVL);
2564
2565 if (Instruction *Folded =
2567 return Folded;
2568 if (Instruction *Folded =
2570 return Folded;
2571
2572 // It may not be safe to reorder shuffles and things like div, urem, etc.
2573 // because we may trap when executing those ops on unknown vector elements.
2574 // See PR20059.
2576 return nullptr;
2577
2578 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2579 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2580 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2581 BO->copyIRFlags(&Inst);
2582 return new ShuffleVectorInst(XY, M);
2583 };
2584
2585 // If both arguments of the binary operation are shuffles that use the same
2586 // mask and shuffle within a single vector, move the shuffle after the binop.
2587 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2588 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2589 V1->getType() == V2->getType() &&
2590 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2591 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2592 return createBinOpShuffle(V1, V2, Mask);
2593 }
2594
2595 // If both arguments of a commutative binop are select-shuffles that use the
2596 // same mask with commuted operands, the shuffles are unnecessary.
2597 if (Inst.isCommutative() &&
2598 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2599 match(RHS,
2600 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2601 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2602 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2603 // TODO: Allow shuffles that contain undefs in the mask?
2604 // That is legal, but it reduces undef knowledge.
2605 // TODO: Allow arbitrary shuffles by shuffling after binop?
2606 // That might be legal, but we have to deal with poison.
2607 if (LShuf->isSelect() &&
2608 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2609 RShuf->isSelect() &&
2610 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2611 // Example:
2612 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2613 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2614 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2615 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2616 NewBO->copyIRFlags(&Inst);
2617 return NewBO;
2618 }
2619 }
2620
2621 // If one argument is a shuffle within one vector and the other is a constant,
2622 // try moving the shuffle after the binary operation. This canonicalization
2623 // intends to move shuffles closer to other shuffles and binops closer to
2624 // other binops, so they can be folded. It may also enable demanded elements
2625 // transforms.
2626 Constant *C;
2628 m_Mask(Mask))),
2629 m_ImmConstant(C)))) {
2630 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2631 "Shuffle should not change scalar type");
2632
2633 bool ConstOp1 = isa<Constant>(RHS);
2634 if (Constant *NewC =
2636 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2637 // which will cause UB for div/rem. Mask them with a safe constant.
2638 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2639 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2640
2641 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2642 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2643 Value *NewLHS = ConstOp1 ? V1 : NewC;
2644 Value *NewRHS = ConstOp1 ? NewC : V1;
2645 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2646 }
2647 }
2648
2649 // Try to reassociate to sink a splat shuffle after a binary operation.
2650 if (Inst.isAssociative() && Inst.isCommutative()) {
2651 // Canonicalize shuffle operand as LHS.
2652 if (isa<ShuffleVectorInst>(RHS))
2653 std::swap(LHS, RHS);
2654
2655 Value *X;
2656 ArrayRef<int> MaskC;
2657 int SplatIndex;
2658 Value *Y, *OtherOp;
2659 if (!match(LHS,
2660 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2661 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2662 X->getType() != Inst.getType() ||
2663 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2664 return nullptr;
2665
2666 // FIXME: This may not be safe if the analysis allows undef elements. By
2667 // moving 'Y' before the splat shuffle, we are implicitly assuming
2668 // that it is not undef/poison at the splat index.
2669 if (isSplatValue(OtherOp, SplatIndex)) {
2670 std::swap(Y, OtherOp);
2671 } else if (!isSplatValue(Y, SplatIndex)) {
2672 return nullptr;
2673 }
2674
2675 // X and Y are splatted values, so perform the binary operation on those
2676 // values followed by a splat followed by the 2nd binary operation:
2677 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2678 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2679 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2680 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2681 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2682
2683 // Intersect FMF on both new binops. Other (poison-generating) flags are
2684 // dropped to be safe.
2685 if (isa<FPMathOperator>(R)) {
2686 R->copyFastMathFlags(&Inst);
2687 R->andIRFlags(RHS);
2688 }
2689 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2690 NewInstBO->copyIRFlags(R);
2691 return R;
2692 }
2693
2694 return nullptr;
2695}
2696
2697/// Try to narrow the width of a binop if at least 1 operand is an extend of
2698/// of a value. This requires a potentially expensive known bits check to make
2699/// sure the narrow op does not overflow.
2700Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2701 // We need at least one extended operand.
2702 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2703
2704 // If this is a sub, we swap the operands since we always want an extension
2705 // on the RHS. The LHS can be an extension or a constant.
2706 if (BO.getOpcode() == Instruction::Sub)
2707 std::swap(Op0, Op1);
2708
2709 Value *X;
2710 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2711 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2712 return nullptr;
2713
2714 // If both operands are the same extension from the same source type and we
2715 // can eliminate at least one (hasOneUse), this might work.
2716 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2717 Value *Y;
2718 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2719 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2720 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2721 // If that did not match, see if we have a suitable constant operand.
2722 // Truncating and extending must produce the same constant.
2723 Constant *WideC;
2724 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2725 return nullptr;
2726 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2727 if (!NarrowC)
2728 return nullptr;
2729 Y = NarrowC;
2730 }
2731
2732 // Swap back now that we found our operands.
2733 if (BO.getOpcode() == Instruction::Sub)
2734 std::swap(X, Y);
2735
2736 // Both operands have narrow versions. Last step: the math must not overflow
2737 // in the narrow width.
2738 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2739 return nullptr;
2740
2741 // bo (ext X), (ext Y) --> ext (bo X, Y)
2742 // bo (ext X), C --> ext (bo X, C')
2743 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2744 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2745 if (IsSext)
2746 NewBinOp->setHasNoSignedWrap();
2747 else
2748 NewBinOp->setHasNoUnsignedWrap();
2749 }
2750 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2751}
2752
2753/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2754/// transform.
2759
2760/// Thread a GEP operation with constant indices through the constant true/false
2761/// arms of a select.
2763 InstCombiner::BuilderTy &Builder) {
2764 if (!GEP.hasAllConstantIndices())
2765 return nullptr;
2766
2767 Instruction *Sel;
2768 Value *Cond;
2769 Constant *TrueC, *FalseC;
2770 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2771 !match(Sel,
2772 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2773 return nullptr;
2774
2775 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2776 // Propagate 'inbounds' and metadata from existing instructions.
2777 // Note: using IRBuilder to create the constants for efficiency.
2778 SmallVector<Value *, 4> IndexC(GEP.indices());
2779 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2780 Type *Ty = GEP.getSourceElementType();
2781 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2782 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2783 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2784}
2785
2786// Canonicalization:
2787// gep T, (gep i8, base, C1), (Index + C2) into
2788// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2790 GEPOperator *Src,
2791 InstCombinerImpl &IC) {
2792 if (GEP.getNumIndices() != 1)
2793 return nullptr;
2794 auto &DL = IC.getDataLayout();
2795 Value *Base;
2796 const APInt *C1;
2797 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2798 return nullptr;
2799 Value *VarIndex;
2800 const APInt *C2;
2801 Type *PtrTy = Src->getType()->getScalarType();
2802 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2803 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2804 return nullptr;
2805 if (C1->getBitWidth() != IndexSizeInBits ||
2806 C2->getBitWidth() != IndexSizeInBits)
2807 return nullptr;
2808 Type *BaseType = GEP.getSourceElementType();
2810 return nullptr;
2811 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2812 APInt NewOffset = TypeSize * *C2 + *C1;
2813 if (NewOffset.isZero() ||
2814 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2816 if (GEP.hasNoUnsignedWrap() &&
2817 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2818 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2820 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2821 Flags |= GEPNoWrapFlags::inBounds();
2822 }
2823
2824 Value *GEPConst =
2825 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2826 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2827 }
2828
2829 return nullptr;
2830}
2831
2832/// Combine constant offsets separated by variable offsets.
2833/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2835 InstCombinerImpl &IC) {
2836 if (!GEP.hasAllConstantIndices())
2837 return nullptr;
2838
2841 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2842 while (true) {
2843 if (!InnerGEP)
2844 return nullptr;
2845
2846 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2847 if (InnerGEP->hasAllConstantIndices())
2848 break;
2849
2850 if (!InnerGEP->hasOneUse())
2851 return nullptr;
2852
2853 Skipped.push_back(InnerGEP);
2854 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2855 }
2856
2857 // The two constant offset GEPs are directly adjacent: Let normal offset
2858 // merging handle it.
2859 if (Skipped.empty())
2860 return nullptr;
2861
2862 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2863 // if profitable.
2864 if (!InnerGEP->hasOneUse())
2865 return nullptr;
2866
2867 // Don't bother with vector splats.
2868 Type *Ty = GEP.getType();
2869 if (InnerGEP->getType() != Ty)
2870 return nullptr;
2871
2872 const DataLayout &DL = IC.getDataLayout();
2873 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2874 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2875 !InnerGEP->accumulateConstantOffset(DL, Offset))
2876 return nullptr;
2877
2878 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2879 for (GetElementPtrInst *SkippedGEP : Skipped)
2880 SkippedGEP->setNoWrapFlags(NW);
2881
2882 return IC.replaceInstUsesWith(
2883 GEP,
2884 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2885 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2886}
2887
2889 GEPOperator *Src) {
2890 // Combine Indices - If the source pointer to this getelementptr instruction
2891 // is a getelementptr instruction with matching element type, combine the
2892 // indices of the two getelementptr instructions into a single instruction.
2893 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2894 return nullptr;
2895
2896 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2897 return I;
2898
2899 if (auto *I = combineConstantOffsets(GEP, *this))
2900 return I;
2901
2902 if (Src->getResultElementType() != GEP.getSourceElementType())
2903 return nullptr;
2904
2905 // Fold chained GEP with constant base into single GEP:
2906 // gep i8, (gep i8, %base, C1), (select Cond, C2, C3)
2907 // -> gep i8, %base, (select Cond, C1+C2, C1+C3)
2908 if (Src->hasOneUse() && GEP.getNumIndices() == 1 &&
2909 Src->getNumIndices() == 1) {
2910 Value *SrcIdx = *Src->idx_begin();
2911 Value *GEPIdx = *GEP.idx_begin();
2912 const APInt *ConstOffset, *TrueVal, *FalseVal;
2913 Value *Cond;
2914
2915 if ((match(SrcIdx, m_APInt(ConstOffset)) &&
2916 match(GEPIdx,
2917 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal)))) ||
2918 (match(GEPIdx, m_APInt(ConstOffset)) &&
2919 match(SrcIdx,
2920 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal))))) {
2921 auto *Select = isa<SelectInst>(GEPIdx) ? cast<SelectInst>(GEPIdx)
2922 : cast<SelectInst>(SrcIdx);
2923
2924 // Make sure the select has only one use.
2925 if (!Select->hasOneUse())
2926 return nullptr;
2927
2928 if (TrueVal->getBitWidth() != ConstOffset->getBitWidth() ||
2929 FalseVal->getBitWidth() != ConstOffset->getBitWidth())
2930 return nullptr;
2931
2932 APInt NewTrueVal = *ConstOffset + *TrueVal;
2933 APInt NewFalseVal = *ConstOffset + *FalseVal;
2934 Constant *NewTrue = ConstantInt::get(Select->getType(), NewTrueVal);
2935 Constant *NewFalse = ConstantInt::get(Select->getType(), NewFalseVal);
2936 Value *NewSelect = Builder.CreateSelect(
2937 Cond, NewTrue, NewFalse, /*Name=*/"",
2938 /*MDFrom=*/(ProfcheckDisableMetadataFixes ? nullptr : Select));
2939 GEPNoWrapFlags Flags =
2941 return replaceInstUsesWith(GEP,
2942 Builder.CreateGEP(GEP.getResultElementType(),
2943 Src->getPointerOperand(),
2944 NewSelect, "", Flags));
2945 }
2946 }
2947
2948 // Find out whether the last index in the source GEP is a sequential idx.
2949 bool EndsWithSequential = false;
2950 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2951 I != E; ++I)
2952 EndsWithSequential = I.isSequential();
2953 if (!EndsWithSequential)
2954 return nullptr;
2955
2956 // Replace: gep (gep %P, long B), long A, ...
2957 // With: T = long A+B; gep %P, T, ...
2958 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2959 Value *GO1 = GEP.getOperand(1);
2960
2961 // If they aren't the same type, then the input hasn't been processed
2962 // by the loop above yet (which canonicalizes sequential index types to
2963 // intptr_t). Just avoid transforming this until the input has been
2964 // normalized.
2965 if (SO1->getType() != GO1->getType())
2966 return nullptr;
2967
2968 Value *Sum =
2969 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2970 // Only do the combine when we are sure the cost after the
2971 // merge is never more than that before the merge.
2972 if (Sum == nullptr)
2973 return nullptr;
2974
2976 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2977 Indices.push_back(Sum);
2978 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2979
2980 // Don't create GEPs with more than one non-zero index.
2981 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2982 auto *C = dyn_cast<Constant>(Idx);
2983 return !C || !C->isNullValue();
2984 });
2985 if (NumNonZeroIndices > 1)
2986 return nullptr;
2987
2988 return replaceInstUsesWith(
2989 GEP, Builder.CreateGEP(
2990 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2992}
2993
2996 bool &DoesConsume, unsigned Depth) {
2997 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2998 // ~(~(X)) -> X.
2999 Value *A, *B;
3000 if (match(V, m_Not(m_Value(A)))) {
3001 DoesConsume = true;
3002 return A;
3003 }
3004
3005 Constant *C;
3006 // Constants can be considered to be not'ed values.
3007 if (match(V, m_ImmConstant(C)))
3008 return ConstantExpr::getNot(C);
3009
3011 return nullptr;
3012
3013 // The rest of the cases require that we invert all uses so don't bother
3014 // doing the analysis if we know we can't use the result.
3015 if (!WillInvertAllUses)
3016 return nullptr;
3017
3018 // Compares can be inverted if all of their uses are being modified to use
3019 // the ~V.
3020 if (auto *I = dyn_cast<CmpInst>(V)) {
3021 if (Builder != nullptr)
3022 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
3023 I->getOperand(1));
3024 return NonNull;
3025 }
3026
3027 // If `V` is of the form `A + B` then `-1 - V` can be folded into
3028 // `(-1 - B) - A` if we are willing to invert all of the uses.
3029 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
3030 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3031 DoesConsume, Depth))
3032 return Builder ? Builder->CreateSub(BV, A) : NonNull;
3033 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3034 DoesConsume, Depth))
3035 return Builder ? Builder->CreateSub(AV, B) : NonNull;
3036 return nullptr;
3037 }
3038
3039 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
3040 // into `A ^ B` if we are willing to invert all of the uses.
3041 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
3042 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3043 DoesConsume, Depth))
3044 return Builder ? Builder->CreateXor(A, BV) : NonNull;
3045 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3046 DoesConsume, Depth))
3047 return Builder ? Builder->CreateXor(AV, B) : NonNull;
3048 return nullptr;
3049 }
3050
3051 // If `V` is of the form `B - A` then `-1 - V` can be folded into
3052 // `A + (-1 - B)` if we are willing to invert all of the uses.
3053 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
3054 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3055 DoesConsume, Depth))
3056 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
3057 return nullptr;
3058 }
3059
3060 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
3061 // into `A s>> B` if we are willing to invert all of the uses.
3062 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
3063 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3064 DoesConsume, Depth))
3065 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
3066 return nullptr;
3067 }
3068
3069 Value *Cond;
3070 // LogicOps are special in that we canonicalize them at the cost of an
3071 // instruction.
3072 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
3074 // Selects/min/max with invertible operands are freely invertible
3075 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
3076 bool LocalDoesConsume = DoesConsume;
3077 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
3078 LocalDoesConsume, Depth))
3079 return nullptr;
3080 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3081 LocalDoesConsume, Depth)) {
3082 DoesConsume = LocalDoesConsume;
3083 if (Builder != nullptr) {
3084 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3085 DoesConsume, Depth);
3086 assert(NotB != nullptr &&
3087 "Unable to build inverted value for known freely invertable op");
3088 if (auto *II = dyn_cast<IntrinsicInst>(V))
3089 return Builder->CreateBinaryIntrinsic(
3090 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
3091 return Builder->CreateSelect(
3092 Cond, NotA, NotB, "",
3094 }
3095 return NonNull;
3096 }
3097 }
3098
3099 if (PHINode *PN = dyn_cast<PHINode>(V)) {
3100 bool LocalDoesConsume = DoesConsume;
3102 for (Use &U : PN->operands()) {
3103 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
3104 Value *NewIncomingVal = getFreelyInvertedImpl(
3105 U.get(), /*WillInvertAllUses=*/false,
3106 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
3107 if (NewIncomingVal == nullptr)
3108 return nullptr;
3109 // Make sure that we can safely erase the original PHI node.
3110 if (NewIncomingVal == V)
3111 return nullptr;
3112 if (Builder != nullptr)
3113 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
3114 }
3115
3116 DoesConsume = LocalDoesConsume;
3117 if (Builder != nullptr) {
3119 Builder->SetInsertPoint(PN);
3120 PHINode *NewPN =
3121 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
3122 for (auto [Val, Pred] : IncomingValues)
3123 NewPN->addIncoming(Val, Pred);
3124 return NewPN;
3125 }
3126 return NonNull;
3127 }
3128
3129 if (match(V, m_SExtLike(m_Value(A)))) {
3130 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3131 DoesConsume, Depth))
3132 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
3133 return nullptr;
3134 }
3135
3136 if (match(V, m_Trunc(m_Value(A)))) {
3137 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3138 DoesConsume, Depth))
3139 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
3140 return nullptr;
3141 }
3142
3143 // De Morgan's Laws:
3144 // (~(A | B)) -> (~A & ~B)
3145 // (~(A & B)) -> (~A | ~B)
3146 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
3147 bool IsLogical, Value *A,
3148 Value *B) -> Value * {
3149 bool LocalDoesConsume = DoesConsume;
3150 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
3151 LocalDoesConsume, Depth))
3152 return nullptr;
3153 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3154 LocalDoesConsume, Depth)) {
3155 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3156 LocalDoesConsume, Depth);
3157 DoesConsume = LocalDoesConsume;
3158 if (IsLogical)
3159 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3160 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3161 }
3162
3163 return nullptr;
3164 };
3165
3166 if (match(V, m_Or(m_Value(A), m_Value(B))))
3167 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
3168 B);
3169
3170 if (match(V, m_And(m_Value(A), m_Value(B))))
3171 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
3172 B);
3173
3174 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
3175 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
3176 B);
3177
3178 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
3179 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
3180 B);
3181
3182 return nullptr;
3183}
3184
3185/// Return true if we should canonicalize the gep to an i8 ptradd.
3187 Value *PtrOp = GEP.getOperand(0);
3188 Type *GEPEltType = GEP.getSourceElementType();
3189 if (GEPEltType->isIntegerTy(8))
3190 return false;
3191
3192 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3193 // intrinsic. This has better support in BasicAA.
3194 if (GEPEltType->isScalableTy())
3195 return true;
3196
3197 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3198 // together.
3199 if (GEP.getNumIndices() == 1 &&
3200 match(GEP.getOperand(1),
3202 m_Shl(m_Value(), m_ConstantInt())))))
3203 return true;
3204
3205 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3206 // possibly be merged together.
3207 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3208 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3209 any_of(GEP.indices(), [](Value *V) {
3210 const APInt *C;
3211 return match(V, m_APInt(C)) && !C->isZero();
3212 });
3213}
3214
3216 IRBuilderBase &Builder) {
3217 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3218 if (!Op1)
3219 return nullptr;
3220
3221 // Don't fold a GEP into itself through a PHI node. This can only happen
3222 // through the back-edge of a loop. Folding a GEP into itself means that
3223 // the value of the previous iteration needs to be stored in the meantime,
3224 // thus requiring an additional register variable to be live, but not
3225 // actually achieving anything (the GEP still needs to be executed once per
3226 // loop iteration).
3227 if (Op1 == &GEP)
3228 return nullptr;
3229 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3230
3231 int DI = -1;
3232
3233 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3234 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3235 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3236 Op1->getSourceElementType() != Op2->getSourceElementType())
3237 return nullptr;
3238
3239 // As for Op1 above, don't try to fold a GEP into itself.
3240 if (Op2 == &GEP)
3241 return nullptr;
3242
3243 // Keep track of the type as we walk the GEP.
3244 Type *CurTy = nullptr;
3245
3246 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3247 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3248 return nullptr;
3249
3250 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3251 if (DI == -1) {
3252 // We have not seen any differences yet in the GEPs feeding the
3253 // PHI yet, so we record this one if it is allowed to be a
3254 // variable.
3255
3256 // The first two arguments can vary for any GEP, the rest have to be
3257 // static for struct slots
3258 if (J > 1) {
3259 assert(CurTy && "No current type?");
3260 if (CurTy->isStructTy())
3261 return nullptr;
3262 }
3263
3264 DI = J;
3265 } else {
3266 // The GEP is different by more than one input. While this could be
3267 // extended to support GEPs that vary by more than one variable it
3268 // doesn't make sense since it greatly increases the complexity and
3269 // would result in an R+R+R addressing mode which no backend
3270 // directly supports and would need to be broken into several
3271 // simpler instructions anyway.
3272 return nullptr;
3273 }
3274 }
3275
3276 // Sink down a layer of the type for the next iteration.
3277 if (J > 0) {
3278 if (J == 1) {
3279 CurTy = Op1->getSourceElementType();
3280 } else {
3281 CurTy =
3282 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3283 }
3284 }
3285 }
3286
3287 NW &= Op2->getNoWrapFlags();
3288 }
3289
3290 // If not all GEPs are identical we'll have to create a new PHI node.
3291 // Check that the old PHI node has only one use so that it will get
3292 // removed.
3293 if (DI != -1 && !PN->hasOneUse())
3294 return nullptr;
3295
3296 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3297 NewGEP->setNoWrapFlags(NW);
3298
3299 if (DI == -1) {
3300 // All the GEPs feeding the PHI are identical. Clone one down into our
3301 // BB so that it can be merged with the current GEP.
3302 } else {
3303 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3304 // into the current block so it can be merged, and create a new PHI to
3305 // set that index.
3306 PHINode *NewPN;
3307 {
3308 IRBuilderBase::InsertPointGuard Guard(Builder);
3309 Builder.SetInsertPoint(PN);
3310 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3311 PN->getNumOperands());
3312 }
3313
3314 for (auto &I : PN->operands())
3315 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3316 PN->getIncomingBlock(I));
3317
3318 NewGEP->setOperand(DI, NewPN);
3319 }
3320
3321 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3322 return NewGEP;
3323}
3324
3326 Value *PtrOp = GEP.getOperand(0);
3327 SmallVector<Value *, 8> Indices(GEP.indices());
3328 Type *GEPType = GEP.getType();
3329 Type *GEPEltType = GEP.getSourceElementType();
3330 if (Value *V =
3331 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3332 SQ.getWithInstruction(&GEP)))
3333 return replaceInstUsesWith(GEP, V);
3334
3335 // For vector geps, use the generic demanded vector support.
3336 // Skip if GEP return type is scalable. The number of elements is unknown at
3337 // compile-time.
3338 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3339 auto VWidth = GEPFVTy->getNumElements();
3340 APInt PoisonElts(VWidth, 0);
3341 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3342 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3343 PoisonElts)) {
3344 if (V != &GEP)
3345 return replaceInstUsesWith(GEP, V);
3346 return &GEP;
3347 }
3348 }
3349
3350 // Eliminate unneeded casts for indices, and replace indices which displace
3351 // by multiples of a zero size type with zero.
3352 bool MadeChange = false;
3353
3354 // Index width may not be the same width as pointer width.
3355 // Data layout chooses the right type based on supported integer types.
3356 Type *NewScalarIndexTy =
3357 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3358
3360 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3361 ++I, ++GTI) {
3362 // Skip indices into struct types.
3363 if (GTI.isStruct())
3364 continue;
3365
3366 Type *IndexTy = (*I)->getType();
3367 Type *NewIndexType =
3368 IndexTy->isVectorTy()
3369 ? VectorType::get(NewScalarIndexTy,
3370 cast<VectorType>(IndexTy)->getElementCount())
3371 : NewScalarIndexTy;
3372
3373 // If the element type has zero size then any index over it is equivalent
3374 // to an index of zero, so replace it with zero if it is not zero already.
3375 Type *EltTy = GTI.getIndexedType();
3376 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3377 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3378 *I = Constant::getNullValue(NewIndexType);
3379 MadeChange = true;
3380 }
3381
3382 if (IndexTy != NewIndexType) {
3383 // If we are using a wider index than needed for this platform, shrink
3384 // it to what we need. If narrower, sign-extend it to what we need.
3385 // This explicit cast can make subsequent optimizations more obvious.
3386 if (IndexTy->getScalarSizeInBits() <
3387 NewIndexType->getScalarSizeInBits()) {
3388 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3389 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3390 else
3391 *I = Builder.CreateSExt(*I, NewIndexType);
3392 } else {
3393 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3394 GEP.hasNoUnsignedSignedWrap());
3395 }
3396 MadeChange = true;
3397 }
3398 }
3399 if (MadeChange)
3400 return &GEP;
3401
3402 // Canonicalize constant GEPs to i8 type.
3403 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3404 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3405 if (GEP.accumulateConstantOffset(DL, Offset))
3406 return replaceInstUsesWith(
3407 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3408 GEP.getNoWrapFlags()));
3409 }
3410
3412 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3413 Value *NewGEP =
3414 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3415 return replaceInstUsesWith(GEP, NewGEP);
3416 }
3417
3418 // Strip trailing zero indices.
3419 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3420 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3421 return replaceInstUsesWith(
3422 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3423 drop_end(Indices), "", GEP.getNoWrapFlags()));
3424 }
3425
3426 // Strip leading zero indices.
3427 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3428 if (FirstIdx && FirstIdx->isNullValue() &&
3429 !FirstIdx->getType()->isVectorTy()) {
3431 ++GTI;
3432 if (!GTI.isStruct())
3433 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3434 GEP.getPointerOperand(),
3435 drop_begin(Indices), "",
3436 GEP.getNoWrapFlags()));
3437 }
3438
3439 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3440 // Note that this looses information about undef lanes; we run it after
3441 // demanded bits to partially mitigate that loss.
3442 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3443 return Op->getType()->isVectorTy() && getSplatValue(Op);
3444 })) {
3445 SmallVector<Value *> NewOps;
3446 for (auto &Op : GEP.operands()) {
3447 if (Op->getType()->isVectorTy())
3448 if (Value *Scalar = getSplatValue(Op)) {
3449 NewOps.push_back(Scalar);
3450 continue;
3451 }
3452 NewOps.push_back(Op);
3453 }
3454
3455 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3456 ArrayRef(NewOps).drop_front(), GEP.getName(),
3457 GEP.getNoWrapFlags());
3458 if (!Res->getType()->isVectorTy()) {
3459 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3460 Res = Builder.CreateVectorSplat(EC, Res);
3461 }
3462 return replaceInstUsesWith(GEP, Res);
3463 }
3464
3465 bool SeenNonZeroIndex = false;
3466 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3467 // Ignore one leading zero index.
3468 auto *C = dyn_cast<Constant>(Idx);
3469 if (C && C->isNullValue() && IdxNum == 0)
3470 continue;
3471
3472 if (!SeenNonZeroIndex) {
3473 SeenNonZeroIndex = true;
3474 continue;
3475 }
3476
3477 // GEP has multiple non-zero indices: Split it.
3478 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3479 Value *FrontGEP =
3480 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3481 GEP.getName() + ".split", GEP.getNoWrapFlags());
3482
3483 SmallVector<Value *> BackIndices;
3484 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3485 append_range(BackIndices, drop_begin(Indices, IdxNum));
3487 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3488 BackIndices, GEP.getNoWrapFlags());
3489 }
3490
3491 // Check to see if the inputs to the PHI node are getelementptr instructions.
3492 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3493 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3494 return replaceOperand(GEP, 0, NewPtrOp);
3495 }
3496
3497 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3498 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3499 return I;
3500
3501 if (GEP.getNumIndices() == 1) {
3502 unsigned AS = GEP.getPointerAddressSpace();
3503 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3504 DL.getIndexSizeInBits(AS)) {
3505 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3506
3507 if (TyAllocSize == 1) {
3508 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3509 // but only if the result pointer is only used as if it were an integer.
3510 // (The case where the underlying object is the same is handled by
3511 // InstSimplify.)
3512 Value *X = GEP.getPointerOperand();
3513 Value *Y;
3514 if (match(GEP.getOperand(1), m_Sub(m_PtrToIntOrAddr(m_Value(Y)),
3516 GEPType == Y->getType()) {
3517 bool HasNonAddressBits =
3518 DL.getAddressSizeInBits(AS) != DL.getPointerSizeInBits(AS);
3519 bool Changed = GEP.replaceUsesWithIf(Y, [&](Use &U) {
3520 return isa<PtrToAddrInst, ICmpInst>(U.getUser()) ||
3521 (!HasNonAddressBits && isa<PtrToIntInst>(U.getUser()));
3522 });
3523 return Changed ? &GEP : nullptr;
3524 }
3525 } else if (auto *ExactIns =
3526 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3527 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3528 Value *V;
3529 if (ExactIns->isExact()) {
3530 if ((has_single_bit(TyAllocSize) &&
3531 match(GEP.getOperand(1),
3532 m_Shr(m_Value(V),
3533 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3534 match(GEP.getOperand(1),
3535 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3536 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3537 GEP.getPointerOperand(), V,
3538 GEP.getNoWrapFlags());
3539 }
3540 }
3541 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3542 // Try to canonicalize non-i8 element type to i8 if the index is an
3543 // exact instruction. If the index is an exact instruction (div/shr)
3544 // with a constant RHS, we can fold the non-i8 element scale into the
3545 // div/shr (similiar to the mul case, just inverted).
3546 const APInt *C;
3547 std::optional<APInt> NewC;
3548 if (has_single_bit(TyAllocSize) &&
3549 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3550 C->uge(countr_zero(TyAllocSize)))
3551 NewC = *C - countr_zero(TyAllocSize);
3552 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3553 APInt Quot;
3554 uint64_t Rem;
3555 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3556 if (Rem == 0)
3557 NewC = Quot;
3558 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3559 APInt Quot;
3560 int64_t Rem;
3561 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3562 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3563 if (!Quot.isAllOnes() && Rem == 0)
3564 NewC = Quot;
3565 }
3566
3567 if (NewC.has_value()) {
3568 Value *NewOp = Builder.CreateBinOp(
3569 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3570 ConstantInt::get(V->getType(), *NewC));
3571 cast<BinaryOperator>(NewOp)->setIsExact();
3572 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3573 GEP.getPointerOperand(), NewOp,
3574 GEP.getNoWrapFlags());
3575 }
3576 }
3577 }
3578 }
3579 }
3580 // We do not handle pointer-vector geps here.
3581 if (GEPType->isVectorTy())
3582 return nullptr;
3583
3584 if (!GEP.isInBounds()) {
3585 unsigned IdxWidth =
3586 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3587 APInt BasePtrOffset(IdxWidth, 0);
3588 Value *UnderlyingPtrOp =
3589 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3590 bool CanBeNull, CanBeFreed;
3591 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3592 DL, CanBeNull, CanBeFreed);
3593 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3594 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3595 BasePtrOffset.isNonNegative()) {
3596 APInt AllocSize(IdxWidth, DerefBytes);
3597 if (BasePtrOffset.ule(AllocSize)) {
3599 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3600 }
3601 }
3602 }
3603 }
3604
3605 // nusw + nneg -> nuw
3606 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3607 all_of(GEP.indices(), [&](Value *Idx) {
3608 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3609 })) {
3610 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3611 return &GEP;
3612 }
3613
3614 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3615 // to do this after having tried to derive "nuw" above.
3616 if (GEP.getNumIndices() == 1) {
3617 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3618 // geps if transforming into (gep (gep p, x), y).
3619 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3620 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3621 // that x + y does not have unsigned wrap.
3622 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3623 return GEP.getNoWrapFlags();
3624 return GEPNoWrapFlags::none();
3625 };
3626
3627 // Try to replace ADD + GEP with GEP + GEP.
3628 Value *Idx1, *Idx2;
3629 if (match(GEP.getOperand(1),
3630 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3631 // %idx = add i64 %idx1, %idx2
3632 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3633 // as:
3634 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3635 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3636 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3637 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3638 auto *NewPtr =
3639 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3640 Idx1, "", NWFlags);
3641 return replaceInstUsesWith(GEP,
3642 Builder.CreateGEP(GEP.getSourceElementType(),
3643 NewPtr, Idx2, "", NWFlags));
3644 }
3645 ConstantInt *C;
3646 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3647 m_Value(Idx1), m_ConstantInt(C))))))) {
3648 // %add = add nsw i32 %idx1, idx2
3649 // %sidx = sext i32 %add to i64
3650 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3651 // as:
3652 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3653 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3654 bool NUW = match(GEP.getOperand(1),
3656 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3657 auto *NewPtr = Builder.CreateGEP(
3658 GEP.getSourceElementType(), GEP.getPointerOperand(),
3659 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3660 return replaceInstUsesWith(
3661 GEP,
3662 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3663 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3664 "", NWFlags));
3665 }
3666 }
3667
3669 return R;
3670
3671 // srem -> (and/urem) for inbounds+nuw GEP
3672 if (Indices.size() == 1 && GEP.isInBounds() && GEP.hasNoUnsignedWrap()) {
3673 Value *X, *Y;
3674
3675 // Match: idx = srem X, Y -- where Y is a power-of-two value.
3676 if (match(Indices[0], m_OneUse(m_SRem(m_Value(X), m_Value(Y)))) &&
3677 isKnownToBeAPowerOfTwo(Y, /*OrZero=*/true, &GEP)) {
3678 // If GEP is inbounds+nuw, the offset cannot be negative
3679 // -> srem by power-of-two can be treated as urem,
3680 // and urem by power-of-two folds to 'and' later.
3681 // OrZero=true is fine here because division by zero is UB.
3682 Instruction *OldIdxI = cast<Instruction>(Indices[0]);
3683 Value *NewIdx = Builder.CreateURem(X, Y, OldIdxI->getName());
3684
3685 return GetElementPtrInst::Create(GEPEltType, PtrOp, {NewIdx},
3686 GEP.getNoWrapFlags());
3687 }
3688 }
3689
3690 return nullptr;
3691}
3692
3694 Instruction *AI) {
3696 return true;
3697 if (auto *LI = dyn_cast<LoadInst>(V))
3698 return isa<GlobalVariable>(LI->getPointerOperand());
3699 // Two distinct allocations will never be equal.
3700 return isAllocLikeFn(V, &TLI) && V != AI;
3701}
3702
3703/// Given a call CB which uses an address UsedV, return true if we can prove the
3704/// call's only possible effect is storing to V.
3705static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3706 const TargetLibraryInfo &TLI) {
3707 if (!CB.use_empty())
3708 // TODO: add recursion if returned attribute is present
3709 return false;
3710
3711 if (CB.isTerminator())
3712 // TODO: remove implementation restriction
3713 return false;
3714
3715 if (!CB.willReturn() || !CB.doesNotThrow())
3716 return false;
3717
3718 // If the only possible side effect of the call is writing to the alloca,
3719 // and the result isn't used, we can safely remove any reads implied by the
3720 // call including those which might read the alloca itself.
3721 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3722 return Dest && Dest->Ptr == UsedV;
3723}
3724
3725static std::optional<ModRefInfo>
3727 const TargetLibraryInfo &TLI, bool KnowInit) {
3729 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3730 Worklist.push_back(AI);
3732
3733 do {
3734 Instruction *PI = Worklist.pop_back_val();
3735 for (User *U : PI->users()) {
3737 switch (I->getOpcode()) {
3738 default:
3739 // Give up the moment we see something we can't handle.
3740 return std::nullopt;
3741
3742 case Instruction::AddrSpaceCast:
3743 case Instruction::BitCast:
3744 case Instruction::GetElementPtr:
3745 Users.emplace_back(I);
3746 Worklist.push_back(I);
3747 continue;
3748
3749 case Instruction::ICmp: {
3750 ICmpInst *ICI = cast<ICmpInst>(I);
3751 // We can fold eq/ne comparisons with null to false/true, respectively.
3752 // We also fold comparisons in some conditions provided the alloc has
3753 // not escaped (see isNeverEqualToUnescapedAlloc).
3754 if (!ICI->isEquality())
3755 return std::nullopt;
3756 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3757 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3758 return std::nullopt;
3759
3760 // Do not fold compares to aligned_alloc calls, as they may have to
3761 // return null in case the required alignment cannot be satisfied,
3762 // unless we can prove that both alignment and size are valid.
3763 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3764 // Check if alignment and size of a call to aligned_alloc is valid,
3765 // that is alignment is a power-of-2 and the size is a multiple of the
3766 // alignment.
3767 const APInt *Alignment;
3768 const APInt *Size;
3769 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3770 match(CB->getArgOperand(1), m_APInt(Size)) &&
3771 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3772 };
3773 auto *CB = dyn_cast<CallBase>(AI);
3774 LibFunc TheLibFunc;
3775 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3776 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3777 !AlignmentAndSizeKnownValid(CB))
3778 return std::nullopt;
3779 Users.emplace_back(I);
3780 continue;
3781 }
3782
3783 case Instruction::Call:
3784 // Ignore no-op and store intrinsics.
3786 switch (II->getIntrinsicID()) {
3787 default:
3788 return std::nullopt;
3789
3790 case Intrinsic::memmove:
3791 case Intrinsic::memcpy:
3792 case Intrinsic::memset: {
3794 if (MI->isVolatile())
3795 return std::nullopt;
3796 // Note: this could also be ModRef, but we can still interpret that
3797 // as just Mod in that case.
3798 ModRefInfo NewAccess =
3799 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3800 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3801 return std::nullopt;
3802 Access |= NewAccess;
3803 [[fallthrough]];
3804 }
3805 case Intrinsic::assume:
3806 case Intrinsic::invariant_start:
3807 case Intrinsic::invariant_end:
3808 case Intrinsic::lifetime_start:
3809 case Intrinsic::lifetime_end:
3810 case Intrinsic::objectsize:
3811 Users.emplace_back(I);
3812 continue;
3813 case Intrinsic::launder_invariant_group:
3814 case Intrinsic::strip_invariant_group:
3815 Users.emplace_back(I);
3816 Worklist.push_back(I);
3817 continue;
3818 }
3819 }
3820
3821 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3822 getAllocationFamily(I, &TLI) == Family) {
3823 Users.emplace_back(I);
3824 continue;
3825 }
3826
3827 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3828 getAllocationFamily(I, &TLI) == Family) {
3829 Users.emplace_back(I);
3830 Worklist.push_back(I);
3831 continue;
3832 }
3833
3834 if (!isRefSet(Access) &&
3835 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3837 Users.emplace_back(I);
3838 continue;
3839 }
3840
3841 return std::nullopt;
3842
3843 case Instruction::Store: {
3845 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3846 return std::nullopt;
3847 if (isRefSet(Access))
3848 return std::nullopt;
3850 Users.emplace_back(I);
3851 continue;
3852 }
3853
3854 case Instruction::Load: {
3855 LoadInst *LI = cast<LoadInst>(I);
3856 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3857 return std::nullopt;
3858 if (isModSet(Access))
3859 return std::nullopt;
3861 Users.emplace_back(I);
3862 continue;
3863 }
3864 }
3865 llvm_unreachable("missing a return?");
3866 }
3867 } while (!Worklist.empty());
3868
3870 return Access;
3871}
3872
3875
3876 // If we have a malloc call which is only used in any amount of comparisons to
3877 // null and free calls, delete the calls and replace the comparisons with true
3878 // or false as appropriate.
3879
3880 // This is based on the principle that we can substitute our own allocation
3881 // function (which will never return null) rather than knowledge of the
3882 // specific function being called. In some sense this can change the permitted
3883 // outputs of a program (when we convert a malloc to an alloca, the fact that
3884 // the allocation is now on the stack is potentially visible, for example),
3885 // but we believe in a permissible manner.
3887
3888 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3889 // before each store.
3891 std::unique_ptr<DIBuilder> DIB;
3892 if (isa<AllocaInst>(MI)) {
3893 findDbgUsers(&MI, DVRs);
3894 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3895 }
3896
3897 // Determine what getInitialValueOfAllocation would return without actually
3898 // allocating the result.
3899 bool KnowInitUndef = false;
3900 bool KnowInitZero = false;
3901 Constant *Init =
3903 if (Init) {
3904 if (isa<UndefValue>(Init))
3905 KnowInitUndef = true;
3906 else if (Init->isNullValue())
3907 KnowInitZero = true;
3908 }
3909 // The various sanitizers don't actually return undef memory, but rather
3910 // memory initialized with special forms of runtime poison
3911 auto &F = *MI.getFunction();
3912 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3913 F.hasFnAttribute(Attribute::SanitizeAddress))
3914 KnowInitUndef = false;
3915
3916 auto Removable =
3917 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3918 if (Removable) {
3919 for (WeakTrackingVH &User : Users) {
3920 // Lowering all @llvm.objectsize and MTI calls first because they may use
3921 // a bitcast/GEP of the alloca we are removing.
3922 if (!User)
3923 continue;
3924
3926
3928 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3929 SmallVector<Instruction *> InsertedInstructions;
3930 Value *Result = lowerObjectSizeCall(
3931 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3932 for (Instruction *Inserted : InsertedInstructions)
3933 Worklist.add(Inserted);
3934 replaceInstUsesWith(*I, Result);
3936 User = nullptr; // Skip examining in the next loop.
3937 continue;
3938 }
3939 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3940 if (KnowInitZero && isRefSet(*Removable)) {
3942 Builder.SetInsertPoint(MTI);
3943 auto *M = Builder.CreateMemSet(
3944 MTI->getRawDest(),
3945 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3946 MTI->getLength(), MTI->getDestAlign());
3947 M->copyMetadata(*MTI);
3948 }
3949 }
3950 }
3951 }
3952 for (WeakTrackingVH &User : Users) {
3953 if (!User)
3954 continue;
3955
3957
3958 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3960 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3961 C->isFalseWhenEqual()));
3962 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3963 for (auto *DVR : DVRs)
3964 if (DVR->isAddressOfVariable())
3966 } else {
3967 // Casts, GEP, or anything else: we're about to delete this instruction,
3968 // so it can not have any valid uses.
3969 Constant *Replace;
3970 if (isa<LoadInst>(I)) {
3971 assert(KnowInitZero || KnowInitUndef);
3972 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3973 : Constant::getNullValue(I->getType());
3974 } else
3975 Replace = PoisonValue::get(I->getType());
3976 replaceInstUsesWith(*I, Replace);
3977 }
3979 }
3980
3982 // Replace invoke with a NOP intrinsic to maintain the original CFG
3983 Module *M = II->getModule();
3984 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3985 auto *NewII = InvokeInst::Create(
3986 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3987 NewII->setDebugLoc(II->getDebugLoc());
3988 }
3989
3990 // Remove debug intrinsics which describe the value contained within the
3991 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3992 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3993 //
3994 // ```
3995 // define void @foo(i32 %0) {
3996 // %a = alloca i32 ; Deleted.
3997 // store i32 %0, i32* %a
3998 // dbg.value(i32 %0, "arg0") ; Not deleted.
3999 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
4000 // call void @trivially_inlinable_no_op(i32* %a)
4001 // ret void
4002 // }
4003 // ```
4004 //
4005 // This may not be required if we stop describing the contents of allocas
4006 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
4007 // the LowerDbgDeclare utility.
4008 //
4009 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
4010 // "arg0" dbg.value may be stale after the call. However, failing to remove
4011 // the DW_OP_deref dbg.value causes large gaps in location coverage.
4012 //
4013 // FIXME: the Assignment Tracking project has now likely made this
4014 // redundant (and it's sometimes harmful).
4015 for (auto *DVR : DVRs)
4016 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
4017 DVR->eraseFromParent();
4018
4019 return eraseInstFromFunction(MI);
4020 }
4021 return nullptr;
4022}
4023
4024/// Move the call to free before a NULL test.
4025///
4026/// Check if this free is accessed after its argument has been test
4027/// against NULL (property 0).
4028/// If yes, it is legal to move this call in its predecessor block.
4029///
4030/// The move is performed only if the block containing the call to free
4031/// will be removed, i.e.:
4032/// 1. it has only one predecessor P, and P has two successors
4033/// 2. it contains the call, noops, and an unconditional branch
4034/// 3. its successor is the same as its predecessor's successor
4035///
4036/// The profitability is out-of concern here and this function should
4037/// be called only if the caller knows this transformation would be
4038/// profitable (e.g., for code size).
4040 const DataLayout &DL) {
4041 Value *Op = FI.getArgOperand(0);
4042 BasicBlock *FreeInstrBB = FI.getParent();
4043 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
4044
4045 // Validate part of constraint #1: Only one predecessor
4046 // FIXME: We can extend the number of predecessor, but in that case, we
4047 // would duplicate the call to free in each predecessor and it may
4048 // not be profitable even for code size.
4049 if (!PredBB)
4050 return nullptr;
4051
4052 // Validate constraint #2: Does this block contains only the call to
4053 // free, noops, and an unconditional branch?
4054 BasicBlock *SuccBB;
4055 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
4056 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
4057 return nullptr;
4058
4059 // If there are only 2 instructions in the block, at this point,
4060 // this is the call to free and unconditional.
4061 // If there are more than 2 instructions, check that they are noops
4062 // i.e., they won't hurt the performance of the generated code.
4063 if (FreeInstrBB->size() != 2) {
4064 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
4065 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
4066 continue;
4067 auto *Cast = dyn_cast<CastInst>(&Inst);
4068 if (!Cast || !Cast->isNoopCast(DL))
4069 return nullptr;
4070 }
4071 }
4072 // Validate the rest of constraint #1 by matching on the pred branch.
4073 Instruction *TI = PredBB->getTerminator();
4074 BasicBlock *TrueBB, *FalseBB;
4075 CmpPredicate Pred;
4076 if (!match(TI, m_Br(m_ICmp(Pred,
4078 m_Specific(Op->stripPointerCasts())),
4079 m_Zero()),
4080 TrueBB, FalseBB)))
4081 return nullptr;
4082 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
4083 return nullptr;
4084
4085 // Validate constraint #3: Ensure the null case just falls through.
4086 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
4087 return nullptr;
4088 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
4089 "Broken CFG: missing edge from predecessor to successor");
4090
4091 // At this point, we know that everything in FreeInstrBB can be moved
4092 // before TI.
4093 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
4094 if (&Instr == FreeInstrBBTerminator)
4095 break;
4096 Instr.moveBeforePreserving(TI->getIterator());
4097 }
4098 assert(FreeInstrBB->size() == 1 &&
4099 "Only the branch instruction should remain");
4100
4101 // Now that we've moved the call to free before the NULL check, we have to
4102 // remove any attributes on its parameter that imply it's non-null, because
4103 // those attributes might have only been valid because of the NULL check, and
4104 // we can get miscompiles if we keep them. This is conservative if non-null is
4105 // also implied by something other than the NULL check, but it's guaranteed to
4106 // be correct, and the conservativeness won't matter in practice, since the
4107 // attributes are irrelevant for the call to free itself and the pointer
4108 // shouldn't be used after the call.
4109 AttributeList Attrs = FI.getAttributes();
4110 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
4111 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
4112 if (Dereferenceable.isValid()) {
4113 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
4114 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
4115 Attribute::Dereferenceable);
4116 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
4117 }
4118 FI.setAttributes(Attrs);
4119
4120 return &FI;
4121}
4122
4124 // free undef -> unreachable.
4125 if (isa<UndefValue>(Op)) {
4126 // Leave a marker since we can't modify the CFG here.
4128 return eraseInstFromFunction(FI);
4129 }
4130
4131 // If we have 'free null' delete the instruction. This can happen in stl code
4132 // when lots of inlining happens.
4134 return eraseInstFromFunction(FI);
4135
4136 // If we had free(realloc(...)) with no intervening uses, then eliminate the
4137 // realloc() entirely.
4139 if (CI && CI->hasOneUse())
4140 if (Value *ReallocatedOp = getReallocatedOperand(CI))
4141 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
4142
4143 // If we optimize for code size, try to move the call to free before the null
4144 // test so that simplify cfg can remove the empty block and dead code
4145 // elimination the branch. I.e., helps to turn something like:
4146 // if (foo) free(foo);
4147 // into
4148 // free(foo);
4149 //
4150 // Note that we can only do this for 'free' and not for any flavor of
4151 // 'operator delete'; there is no 'operator delete' symbol for which we are
4152 // permitted to invent a call, even if we're passing in a null pointer.
4153 if (MinimizeSize) {
4154 LibFunc Func;
4155 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
4157 return I;
4158 }
4159
4160 return nullptr;
4161}
4162
4164 Value *RetVal = RI.getReturnValue();
4165 if (!RetVal)
4166 return nullptr;
4167
4168 Function *F = RI.getFunction();
4169 Type *RetTy = RetVal->getType();
4170 if (RetTy->isPointerTy()) {
4171 bool HasDereferenceable =
4172 F->getAttributes().getRetDereferenceableBytes() > 0;
4173 if (F->hasRetAttribute(Attribute::NonNull) ||
4174 (HasDereferenceable &&
4176 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4177 return replaceOperand(RI, 0, V);
4178 }
4179 }
4180
4181 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4182 return nullptr;
4183
4184 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
4185 if (ReturnClass == fcNone)
4186 return nullptr;
4187
4188 KnownFPClass KnownClass;
4189 if (SimplifyDemandedFPClass(&RI, 0, ~ReturnClass, KnownClass))
4190 return &RI;
4191
4192 return nullptr;
4193}
4194
4195// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
4197 // Try to remove the previous instruction if it must lead to unreachable.
4198 // This includes instructions like stores and "llvm.assume" that may not get
4199 // removed by simple dead code elimination.
4200 bool Changed = false;
4201 while (Instruction *Prev = I.getPrevNode()) {
4202 // While we theoretically can erase EH, that would result in a block that
4203 // used to start with an EH no longer starting with EH, which is invalid.
4204 // To make it valid, we'd need to fixup predecessors to no longer refer to
4205 // this block, but that changes CFG, which is not allowed in InstCombine.
4206 if (Prev->isEHPad())
4207 break; // Can not drop any more instructions. We're done here.
4208
4210 break; // Can not drop any more instructions. We're done here.
4211 // Otherwise, this instruction can be freely erased,
4212 // even if it is not side-effect free.
4213
4214 // A value may still have uses before we process it here (for example, in
4215 // another unreachable block), so convert those to poison.
4216 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4217 eraseInstFromFunction(*Prev);
4218 Changed = true;
4219 }
4220 return Changed;
4221}
4222
4227
4229 assert(BI.isUnconditional() && "Only for unconditional branches.");
4230
4231 // If this store is the second-to-last instruction in the basic block
4232 // (excluding debug info) and if the block ends with
4233 // an unconditional branch, try to move the store to the successor block.
4234
4235 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4236 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4237 do {
4238 if (BBI != FirstInstr)
4239 --BBI;
4240 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4241
4242 return dyn_cast<StoreInst>(BBI);
4243 };
4244
4245 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4247 return &BI;
4248
4249 return nullptr;
4250}
4251
4254 if (!DeadEdges.insert({From, To}).second)
4255 return;
4256
4257 // Replace phi node operands in successor with poison.
4258 for (PHINode &PN : To->phis())
4259 for (Use &U : PN.incoming_values())
4260 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4261 replaceUse(U, PoisonValue::get(PN.getType()));
4262 addToWorklist(&PN);
4263 MadeIRChange = true;
4264 }
4265
4266 Worklist.push_back(To);
4267}
4268
4269// Under the assumption that I is unreachable, remove it and following
4270// instructions. Changes are reported directly to MadeIRChange.
4273 BasicBlock *BB = I->getParent();
4274 for (Instruction &Inst : make_early_inc_range(
4275 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4276 std::next(I->getReverseIterator())))) {
4277 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4278 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4279 MadeIRChange = true;
4280 }
4281 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4282 continue;
4283 // RemoveDIs: erase debug-info on this instruction manually.
4284 Inst.dropDbgRecords();
4286 MadeIRChange = true;
4287 }
4288
4291 MadeIRChange = true;
4292 for (Value *V : Changed)
4294 }
4295
4296 // Handle potentially dead successors.
4297 for (BasicBlock *Succ : successors(BB))
4298 addDeadEdge(BB, Succ, Worklist);
4299}
4300
4303 while (!Worklist.empty()) {
4304 BasicBlock *BB = Worklist.pop_back_val();
4305 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4306 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4307 }))
4308 continue;
4309
4311 }
4312}
4313
4315 BasicBlock *LiveSucc) {
4317 for (BasicBlock *Succ : successors(BB)) {
4318 // The live successor isn't dead.
4319 if (Succ == LiveSucc)
4320 continue;
4321
4322 addDeadEdge(BB, Succ, Worklist);
4323 }
4324
4326}
4327
4329 if (BI.isUnconditional())
4331
4332 // Change br (not X), label True, label False to: br X, label False, True
4333 Value *Cond = BI.getCondition();
4334 Value *X;
4335 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4336 // Swap Destinations and condition...
4337 BI.swapSuccessors();
4338 if (BPI)
4339 BPI->swapSuccEdgesProbabilities(BI.getParent());
4340 return replaceOperand(BI, 0, X);
4341 }
4342
4343 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4344 // This is done by inverting the condition and swapping successors:
4345 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4346 Value *Y;
4347 if (isa<SelectInst>(Cond) &&
4348 match(Cond,
4350 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4351 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4352
4353 // Set weights for the new OR select instruction too.
4355 if (auto *OrInst = dyn_cast<Instruction>(Or)) {
4356 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
4357 SmallVector<uint32_t> Weights;
4358 if (extractBranchWeights(*CondInst, Weights)) {
4359 assert(Weights.size() == 2 &&
4360 "Unexpected number of branch weights!");
4361 std::swap(Weights[0], Weights[1]);
4362 setBranchWeights(*OrInst, Weights, /*IsExpected=*/false);
4363 }
4364 }
4365 }
4366 }
4367 BI.swapSuccessors();
4368 if (BPI)
4369 BPI->swapSuccEdgesProbabilities(BI.getParent());
4370 return replaceOperand(BI, 0, Or);
4371 }
4372
4373 // If the condition is irrelevant, remove the use so that other
4374 // transforms on the condition become more effective.
4375 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4376 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4377
4378 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4379 CmpPredicate Pred;
4380 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4381 !isCanonicalPredicate(Pred)) {
4382 // Swap destinations and condition.
4383 auto *Cmp = cast<CmpInst>(Cond);
4384 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4385 BI.swapSuccessors();
4386 if (BPI)
4387 BPI->swapSuccEdgesProbabilities(BI.getParent());
4388 Worklist.push(Cmp);
4389 return &BI;
4390 }
4391
4392 if (isa<UndefValue>(Cond)) {
4393 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4394 return nullptr;
4395 }
4396 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4398 BI.getSuccessor(!CI->getZExtValue()));
4399 return nullptr;
4400 }
4401
4402 // Replace all dominated uses of the condition with true/false
4403 // Ignore constant expressions to avoid iterating over uses on other
4404 // functions.
4405 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4406 for (auto &U : make_early_inc_range(Cond->uses())) {
4407 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4408 if (DT.dominates(Edge0, U)) {
4409 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4410 addToWorklist(cast<Instruction>(U.getUser()));
4411 continue;
4412 }
4413 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4414 if (DT.dominates(Edge1, U)) {
4415 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4416 addToWorklist(cast<Instruction>(U.getUser()));
4417 }
4418 }
4419 }
4420
4421 DC.registerBranch(&BI);
4422 return nullptr;
4423}
4424
4425// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4426// we can prove that both (switch C) and (switch X) go to the default when cond
4427// is false/true.
4430 bool IsTrueArm) {
4431 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4432 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4433 if (!C)
4434 return nullptr;
4435
4436 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4437 if (CstBB != SI.getDefaultDest())
4438 return nullptr;
4439 Value *X = Select->getOperand(3 - CstOpIdx);
4440 CmpPredicate Pred;
4441 const APInt *RHSC;
4442 if (!match(Select->getCondition(),
4443 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4444 return nullptr;
4445 if (IsTrueArm)
4446 Pred = ICmpInst::getInversePredicate(Pred);
4447
4448 // See whether we can replace the select with X
4450 for (auto Case : SI.cases())
4451 if (!CR.contains(Case.getCaseValue()->getValue()))
4452 return nullptr;
4453
4454 return X;
4455}
4456
4458 Value *Cond = SI.getCondition();
4459 Value *Op0;
4460 const APInt *CondOpC;
4461 using InvertFn = std::function<APInt(const APInt &Case, const APInt &C)>;
4462
4463 auto MaybeInvertible = [&](Value *Cond) -> InvertFn {
4464 if (match(Cond, m_Add(m_Value(Op0), m_APInt(CondOpC))))
4465 // Change 'switch (X+C) case Case:' into 'switch (X) case Case-C'.
4466 return [](const APInt &Case, const APInt &C) { return Case - C; };
4467
4468 if (match(Cond, m_Sub(m_APInt(CondOpC), m_Value(Op0))))
4469 // Change 'switch (C-X) case Case:' into 'switch (X) case C-Case'.
4470 return [](const APInt &Case, const APInt &C) { return C - Case; };
4471
4472 if (match(Cond, m_Xor(m_Value(Op0), m_APInt(CondOpC))) &&
4473 !CondOpC->isMinSignedValue() && !CondOpC->isMaxSignedValue())
4474 // Change 'switch (X^C) case Case:' into 'switch (X) case Case^C'.
4475 // Prevent creation of large case values by excluding extremes.
4476 return [](const APInt &Case, const APInt &C) { return Case ^ C; };
4477
4478 return nullptr;
4479 };
4480
4481 // Attempt to invert and simplify the switch condition, as long as the
4482 // condition is not used further, as it may not be profitable otherwise.
4483 if (auto InvertFn = MaybeInvertible(Cond); InvertFn && Cond->hasOneUse()) {
4484 for (auto &Case : SI.cases()) {
4485 const APInt &New = InvertFn(Case.getCaseValue()->getValue(), *CondOpC);
4486 Case.setValue(ConstantInt::get(SI.getContext(), New));
4487 }
4488 return replaceOperand(SI, 0, Op0);
4489 }
4490
4491 uint64_t ShiftAmt;
4492 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4493 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4494 all_of(SI.cases(), [&](const auto &Case) {
4495 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4496 })) {
4497 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4499 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4500 Shl->hasOneUse()) {
4501 Value *NewCond = Op0;
4502 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4503 // If the shift may wrap, we need to mask off the shifted bits.
4504 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4505 NewCond = Builder.CreateAnd(
4506 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4507 }
4508 for (auto Case : SI.cases()) {
4509 const APInt &CaseVal = Case.getCaseValue()->getValue();
4510 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4511 : CaseVal.lshr(ShiftAmt);
4512 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4513 }
4514 return replaceOperand(SI, 0, NewCond);
4515 }
4516 }
4517
4518 // Fold switch(zext/sext(X)) into switch(X) if possible.
4519 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4520 bool IsZExt = isa<ZExtInst>(Cond);
4521 Type *SrcTy = Op0->getType();
4522 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4523
4524 if (all_of(SI.cases(), [&](const auto &Case) {
4525 const APInt &CaseVal = Case.getCaseValue()->getValue();
4526 return IsZExt ? CaseVal.isIntN(NewWidth)
4527 : CaseVal.isSignedIntN(NewWidth);
4528 })) {
4529 for (auto &Case : SI.cases()) {
4530 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4531 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4532 }
4533 return replaceOperand(SI, 0, Op0);
4534 }
4535 }
4536
4537 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4538 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4539 if (Value *V =
4540 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4541 return replaceOperand(SI, 0, V);
4542 if (Value *V =
4543 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4544 return replaceOperand(SI, 0, V);
4545 }
4546
4547 KnownBits Known = computeKnownBits(Cond, &SI);
4548 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4549 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4550
4551 // Compute the number of leading bits we can ignore.
4552 // TODO: A better way to determine this would use ComputeNumSignBits().
4553 for (const auto &C : SI.cases()) {
4554 LeadingKnownZeros =
4555 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4556 LeadingKnownOnes =
4557 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4558 }
4559
4560 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4561
4562 // Shrink the condition operand if the new type is smaller than the old type.
4563 // But do not shrink to a non-standard type, because backend can't generate
4564 // good code for that yet.
4565 // TODO: We can make it aggressive again after fixing PR39569.
4566 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4567 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4568 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4569 Builder.SetInsertPoint(&SI);
4570 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4571
4572 for (auto Case : SI.cases()) {
4573 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4574 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4575 }
4576 return replaceOperand(SI, 0, NewCond);
4577 }
4578
4579 if (isa<UndefValue>(Cond)) {
4580 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4581 return nullptr;
4582 }
4583 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4585 SI.findCaseValue(CI)->getCaseSuccessor());
4586 return nullptr;
4587 }
4588
4589 return nullptr;
4590}
4591
4593InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4595 if (!WO)
4596 return nullptr;
4597
4598 Intrinsic::ID OvID = WO->getIntrinsicID();
4599 const APInt *C = nullptr;
4600 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4601 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4602 OvID == Intrinsic::umul_with_overflow)) {
4603 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4604 if (C->isAllOnes())
4605 return BinaryOperator::CreateNeg(WO->getLHS());
4606 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4607 if (C->isPowerOf2()) {
4608 return BinaryOperator::CreateShl(
4609 WO->getLHS(),
4610 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4611 }
4612 }
4613 }
4614
4615 // We're extracting from an overflow intrinsic. See if we're the only user.
4616 // That allows us to simplify multiple result intrinsics to simpler things
4617 // that just get one value.
4618 if (!WO->hasOneUse())
4619 return nullptr;
4620
4621 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4622 // and replace it with a traditional binary instruction.
4623 if (*EV.idx_begin() == 0) {
4624 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4625 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4626 // Replace the old instruction's uses with poison.
4627 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4629 return BinaryOperator::Create(BinOp, LHS, RHS);
4630 }
4631
4632 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4633
4634 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4635 if (OvID == Intrinsic::usub_with_overflow)
4636 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4637
4638 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4639 // +1 is not possible because we assume signed values.
4640 if (OvID == Intrinsic::smul_with_overflow &&
4641 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4642 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4643
4644 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4645 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4646 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4647 // Only handle even bitwidths for performance reasons.
4648 if (BitWidth % 2 == 0)
4649 return new ICmpInst(
4650 ICmpInst::ICMP_UGT, WO->getLHS(),
4651 ConstantInt::get(WO->getLHS()->getType(),
4653 }
4654
4655 // If only the overflow result is used, and the right hand side is a
4656 // constant (or constant splat), we can remove the intrinsic by directly
4657 // checking for overflow.
4658 if (C) {
4659 // Compute the no-wrap range for LHS given RHS=C, then construct an
4660 // equivalent icmp, potentially using an offset.
4661 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4662 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4663
4664 CmpInst::Predicate Pred;
4665 APInt NewRHSC, Offset;
4666 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4667 auto *OpTy = WO->getRHS()->getType();
4668 auto *NewLHS = WO->getLHS();
4669 if (Offset != 0)
4670 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4671 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4672 ConstantInt::get(OpTy, NewRHSC));
4673 }
4674
4675 return nullptr;
4676}
4677
4680 InstCombiner::BuilderTy &Builder) {
4681 // Helper to fold frexp of select to select of frexp.
4682
4683 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4684 return nullptr;
4686 Value *TrueVal = SelectInst->getTrueValue();
4687 Value *FalseVal = SelectInst->getFalseValue();
4688
4689 const APFloat *ConstVal = nullptr;
4690 Value *VarOp = nullptr;
4691 bool ConstIsTrue = false;
4692
4693 if (match(TrueVal, m_APFloat(ConstVal))) {
4694 VarOp = FalseVal;
4695 ConstIsTrue = true;
4696 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4697 VarOp = TrueVal;
4698 ConstIsTrue = false;
4699 } else {
4700 return nullptr;
4701 }
4702
4703 Builder.SetInsertPoint(&EV);
4704
4705 CallInst *NewFrexp =
4706 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4707 NewFrexp->copyIRFlags(FrexpCall);
4708
4709 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4710
4711 int Exp;
4712 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4713
4714 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4715
4716 Value *NewSel = Builder.CreateSelectFMF(
4717 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4718 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4719 return NewSel;
4720}
4722 Value *Agg = EV.getAggregateOperand();
4723
4724 if (!EV.hasIndices())
4725 return replaceInstUsesWith(EV, Agg);
4726
4727 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4728 SQ.getWithInstruction(&EV)))
4729 return replaceInstUsesWith(EV, V);
4730
4731 Value *Cond, *TrueVal, *FalseVal;
4733 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4734 auto *SelInst =
4735 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4736 if (Value *Result =
4737 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4738 return replaceInstUsesWith(EV, Result);
4739 }
4741 // We're extracting from an insertvalue instruction, compare the indices
4742 const unsigned *exti, *exte, *insi, *inse;
4743 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4744 exte = EV.idx_end(), inse = IV->idx_end();
4745 exti != exte && insi != inse;
4746 ++exti, ++insi) {
4747 if (*insi != *exti)
4748 // The insert and extract both reference distinctly different elements.
4749 // This means the extract is not influenced by the insert, and we can
4750 // replace the aggregate operand of the extract with the aggregate
4751 // operand of the insert. i.e., replace
4752 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4753 // %E = extractvalue { i32, { i32 } } %I, 0
4754 // with
4755 // %E = extractvalue { i32, { i32 } } %A, 0
4756 return ExtractValueInst::Create(IV->getAggregateOperand(),
4757 EV.getIndices());
4758 }
4759 if (exti == exte && insi == inse)
4760 // Both iterators are at the end: Index lists are identical. Replace
4761 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4762 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4763 // with "i32 42"
4764 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4765 if (exti == exte) {
4766 // The extract list is a prefix of the insert list. i.e. replace
4767 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4768 // %E = extractvalue { i32, { i32 } } %I, 1
4769 // with
4770 // %X = extractvalue { i32, { i32 } } %A, 1
4771 // %E = insertvalue { i32 } %X, i32 42, 0
4772 // by switching the order of the insert and extract (though the
4773 // insertvalue should be left in, since it may have other uses).
4774 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4775 EV.getIndices());
4776 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4777 ArrayRef(insi, inse));
4778 }
4779 if (insi == inse)
4780 // The insert list is a prefix of the extract list
4781 // We can simply remove the common indices from the extract and make it
4782 // operate on the inserted value instead of the insertvalue result.
4783 // i.e., replace
4784 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4785 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4786 // with
4787 // %E extractvalue { i32 } { i32 42 }, 0
4788 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4789 ArrayRef(exti, exte));
4790 }
4791
4792 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4793 return R;
4794
4795 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4796 // Bail out if the aggregate contains scalable vector type
4797 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4798 STy && STy->isScalableTy())
4799 return nullptr;
4800
4801 // If the (non-volatile) load only has one use, we can rewrite this to a
4802 // load from a GEP. This reduces the size of the load. If a load is used
4803 // only by extractvalue instructions then this either must have been
4804 // optimized before, or it is a struct with padding, in which case we
4805 // don't want to do the transformation as it loses padding knowledge.
4806 if (L->isSimple() && L->hasOneUse()) {
4807 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4808 SmallVector<Value*, 4> Indices;
4809 // Prefix an i32 0 since we need the first element.
4810 Indices.push_back(Builder.getInt32(0));
4811 for (unsigned Idx : EV.indices())
4812 Indices.push_back(Builder.getInt32(Idx));
4813
4814 // We need to insert these at the location of the old load, not at that of
4815 // the extractvalue.
4816 Builder.SetInsertPoint(L);
4817 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4818 L->getPointerOperand(), Indices);
4819 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4820 // Whatever aliasing information we had for the orignal load must also
4821 // hold for the smaller load, so propagate the annotations.
4822 NL->setAAMetadata(L->getAAMetadata());
4823 // Returning the load directly will cause the main loop to insert it in
4824 // the wrong spot, so use replaceInstUsesWith().
4825 return replaceInstUsesWith(EV, NL);
4826 }
4827 }
4828
4829 if (auto *PN = dyn_cast<PHINode>(Agg))
4830 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4831 return Res;
4832
4833 // Canonicalize extract (select Cond, TV, FV)
4834 // -> select cond, (extract TV), (extract FV)
4835 if (auto *SI = dyn_cast<SelectInst>(Agg))
4836 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4837 return R;
4838
4839 // We could simplify extracts from other values. Note that nested extracts may
4840 // already be simplified implicitly by the above: extract (extract (insert) )
4841 // will be translated into extract ( insert ( extract ) ) first and then just
4842 // the value inserted, if appropriate. Similarly for extracts from single-use
4843 // loads: extract (extract (load)) will be translated to extract (load (gep))
4844 // and if again single-use then via load (gep (gep)) to load (gep).
4845 // However, double extracts from e.g. function arguments or return values
4846 // aren't handled yet.
4847 return nullptr;
4848}
4849
4850/// Return 'true' if the given typeinfo will match anything.
4851static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4852 switch (Personality) {
4856 // The GCC C EH and Rust personality only exists to support cleanups, so
4857 // it's not clear what the semantics of catch clauses are.
4858 return false;
4860 return false;
4862 // While __gnat_all_others_value will match any Ada exception, it doesn't
4863 // match foreign exceptions (or didn't, before gcc-4.7).
4864 return false;
4875 return TypeInfo->isNullValue();
4876 }
4877 llvm_unreachable("invalid enum");
4878}
4879
4880static bool shorter_filter(const Value *LHS, const Value *RHS) {
4881 return
4882 cast<ArrayType>(LHS->getType())->getNumElements()
4883 <
4884 cast<ArrayType>(RHS->getType())->getNumElements();
4885}
4886
4888 // The logic here should be correct for any real-world personality function.
4889 // However if that turns out not to be true, the offending logic can always
4890 // be conditioned on the personality function, like the catch-all logic is.
4891 EHPersonality Personality =
4892 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4893
4894 // Simplify the list of clauses, eg by removing repeated catch clauses
4895 // (these are often created by inlining).
4896 bool MakeNewInstruction = false; // If true, recreate using the following:
4897 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4898 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4899
4900 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4901 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4902 bool isLastClause = i + 1 == e;
4903 if (LI.isCatch(i)) {
4904 // A catch clause.
4905 Constant *CatchClause = LI.getClause(i);
4906 Constant *TypeInfo = CatchClause->stripPointerCasts();
4907
4908 // If we already saw this clause, there is no point in having a second
4909 // copy of it.
4910 if (AlreadyCaught.insert(TypeInfo).second) {
4911 // This catch clause was not already seen.
4912 NewClauses.push_back(CatchClause);
4913 } else {
4914 // Repeated catch clause - drop the redundant copy.
4915 MakeNewInstruction = true;
4916 }
4917
4918 // If this is a catch-all then there is no point in keeping any following
4919 // clauses or marking the landingpad as having a cleanup.
4920 if (isCatchAll(Personality, TypeInfo)) {
4921 if (!isLastClause)
4922 MakeNewInstruction = true;
4923 CleanupFlag = false;
4924 break;
4925 }
4926 } else {
4927 // A filter clause. If any of the filter elements were already caught
4928 // then they can be dropped from the filter. It is tempting to try to
4929 // exploit the filter further by saying that any typeinfo that does not
4930 // occur in the filter can't be caught later (and thus can be dropped).
4931 // However this would be wrong, since typeinfos can match without being
4932 // equal (for example if one represents a C++ class, and the other some
4933 // class derived from it).
4934 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4935 Constant *FilterClause = LI.getClause(i);
4936 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4937 unsigned NumTypeInfos = FilterType->getNumElements();
4938
4939 // An empty filter catches everything, so there is no point in keeping any
4940 // following clauses or marking the landingpad as having a cleanup. By
4941 // dealing with this case here the following code is made a bit simpler.
4942 if (!NumTypeInfos) {
4943 NewClauses.push_back(FilterClause);
4944 if (!isLastClause)
4945 MakeNewInstruction = true;
4946 CleanupFlag = false;
4947 break;
4948 }
4949
4950 bool MakeNewFilter = false; // If true, make a new filter.
4951 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4952 if (isa<ConstantAggregateZero>(FilterClause)) {
4953 // Not an empty filter - it contains at least one null typeinfo.
4954 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4955 Constant *TypeInfo =
4957 // If this typeinfo is a catch-all then the filter can never match.
4958 if (isCatchAll(Personality, TypeInfo)) {
4959 // Throw the filter away.
4960 MakeNewInstruction = true;
4961 continue;
4962 }
4963
4964 // There is no point in having multiple copies of this typeinfo, so
4965 // discard all but the first copy if there is more than one.
4966 NewFilterElts.push_back(TypeInfo);
4967 if (NumTypeInfos > 1)
4968 MakeNewFilter = true;
4969 } else {
4970 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4971 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4972 NewFilterElts.reserve(NumTypeInfos);
4973
4974 // Remove any filter elements that were already caught or that already
4975 // occurred in the filter. While there, see if any of the elements are
4976 // catch-alls. If so, the filter can be discarded.
4977 bool SawCatchAll = false;
4978 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4979 Constant *Elt = Filter->getOperand(j);
4980 Constant *TypeInfo = Elt->stripPointerCasts();
4981 if (isCatchAll(Personality, TypeInfo)) {
4982 // This element is a catch-all. Bail out, noting this fact.
4983 SawCatchAll = true;
4984 break;
4985 }
4986
4987 // Even if we've seen a type in a catch clause, we don't want to
4988 // remove it from the filter. An unexpected type handler may be
4989 // set up for a call site which throws an exception of the same
4990 // type caught. In order for the exception thrown by the unexpected
4991 // handler to propagate correctly, the filter must be correctly
4992 // described for the call site.
4993 //
4994 // Example:
4995 //
4996 // void unexpected() { throw 1;}
4997 // void foo() throw (int) {
4998 // std::set_unexpected(unexpected);
4999 // try {
5000 // throw 2.0;
5001 // } catch (int i) {}
5002 // }
5003
5004 // There is no point in having multiple copies of the same typeinfo in
5005 // a filter, so only add it if we didn't already.
5006 if (SeenInFilter.insert(TypeInfo).second)
5007 NewFilterElts.push_back(cast<Constant>(Elt));
5008 }
5009 // A filter containing a catch-all cannot match anything by definition.
5010 if (SawCatchAll) {
5011 // Throw the filter away.
5012 MakeNewInstruction = true;
5013 continue;
5014 }
5015
5016 // If we dropped something from the filter, make a new one.
5017 if (NewFilterElts.size() < NumTypeInfos)
5018 MakeNewFilter = true;
5019 }
5020 if (MakeNewFilter) {
5021 FilterType = ArrayType::get(FilterType->getElementType(),
5022 NewFilterElts.size());
5023 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
5024 MakeNewInstruction = true;
5025 }
5026
5027 NewClauses.push_back(FilterClause);
5028
5029 // If the new filter is empty then it will catch everything so there is
5030 // no point in keeping any following clauses or marking the landingpad
5031 // as having a cleanup. The case of the original filter being empty was
5032 // already handled above.
5033 if (MakeNewFilter && !NewFilterElts.size()) {
5034 assert(MakeNewInstruction && "New filter but not a new instruction!");
5035 CleanupFlag = false;
5036 break;
5037 }
5038 }
5039 }
5040
5041 // If several filters occur in a row then reorder them so that the shortest
5042 // filters come first (those with the smallest number of elements). This is
5043 // advantageous because shorter filters are more likely to match, speeding up
5044 // unwinding, but mostly because it increases the effectiveness of the other
5045 // filter optimizations below.
5046 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
5047 unsigned j;
5048 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
5049 for (j = i; j != e; ++j)
5050 if (!isa<ArrayType>(NewClauses[j]->getType()))
5051 break;
5052
5053 // Check whether the filters are already sorted by length. We need to know
5054 // if sorting them is actually going to do anything so that we only make a
5055 // new landingpad instruction if it does.
5056 for (unsigned k = i; k + 1 < j; ++k)
5057 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
5058 // Not sorted, so sort the filters now. Doing an unstable sort would be
5059 // correct too but reordering filters pointlessly might confuse users.
5060 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
5062 MakeNewInstruction = true;
5063 break;
5064 }
5065
5066 // Look for the next batch of filters.
5067 i = j + 1;
5068 }
5069
5070 // If typeinfos matched if and only if equal, then the elements of a filter L
5071 // that occurs later than a filter F could be replaced by the intersection of
5072 // the elements of F and L. In reality two typeinfos can match without being
5073 // equal (for example if one represents a C++ class, and the other some class
5074 // derived from it) so it would be wrong to perform this transform in general.
5075 // However the transform is correct and useful if F is a subset of L. In that
5076 // case L can be replaced by F, and thus removed altogether since repeating a
5077 // filter is pointless. So here we look at all pairs of filters F and L where
5078 // L follows F in the list of clauses, and remove L if every element of F is
5079 // an element of L. This can occur when inlining C++ functions with exception
5080 // specifications.
5081 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
5082 // Examine each filter in turn.
5083 Value *Filter = NewClauses[i];
5084 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
5085 if (!FTy)
5086 // Not a filter - skip it.
5087 continue;
5088 unsigned FElts = FTy->getNumElements();
5089 // Examine each filter following this one. Doing this backwards means that
5090 // we don't have to worry about filters disappearing under us when removed.
5091 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
5092 Value *LFilter = NewClauses[j];
5093 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
5094 if (!LTy)
5095 // Not a filter - skip it.
5096 continue;
5097 // If Filter is a subset of LFilter, i.e. every element of Filter is also
5098 // an element of LFilter, then discard LFilter.
5099 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
5100 // If Filter is empty then it is a subset of LFilter.
5101 if (!FElts) {
5102 // Discard LFilter.
5103 NewClauses.erase(J);
5104 MakeNewInstruction = true;
5105 // Move on to the next filter.
5106 continue;
5107 }
5108 unsigned LElts = LTy->getNumElements();
5109 // If Filter is longer than LFilter then it cannot be a subset of it.
5110 if (FElts > LElts)
5111 // Move on to the next filter.
5112 continue;
5113 // At this point we know that LFilter has at least one element.
5114 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
5115 // Filter is a subset of LFilter iff Filter contains only zeros (as we
5116 // already know that Filter is not longer than LFilter).
5118 assert(FElts <= LElts && "Should have handled this case earlier!");
5119 // Discard LFilter.
5120 NewClauses.erase(J);
5121 MakeNewInstruction = true;
5122 }
5123 // Move on to the next filter.
5124 continue;
5125 }
5126 ConstantArray *LArray = cast<ConstantArray>(LFilter);
5127 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
5128 // Since Filter is non-empty and contains only zeros, it is a subset of
5129 // LFilter iff LFilter contains a zero.
5130 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
5131 for (unsigned l = 0; l != LElts; ++l)
5132 if (LArray->getOperand(l)->isNullValue()) {
5133 // LFilter contains a zero - discard it.
5134 NewClauses.erase(J);
5135 MakeNewInstruction = true;
5136 break;
5137 }
5138 // Move on to the next filter.
5139 continue;
5140 }
5141 // At this point we know that both filters are ConstantArrays. Loop over
5142 // operands to see whether every element of Filter is also an element of
5143 // LFilter. Since filters tend to be short this is probably faster than
5144 // using a method that scales nicely.
5146 bool AllFound = true;
5147 for (unsigned f = 0; f != FElts; ++f) {
5148 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
5149 AllFound = false;
5150 for (unsigned l = 0; l != LElts; ++l) {
5151 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
5152 if (LTypeInfo == FTypeInfo) {
5153 AllFound = true;
5154 break;
5155 }
5156 }
5157 if (!AllFound)
5158 break;
5159 }
5160 if (AllFound) {
5161 // Discard LFilter.
5162 NewClauses.erase(J);
5163 MakeNewInstruction = true;
5164 }
5165 // Move on to the next filter.
5166 }
5167 }
5168
5169 // If we changed any of the clauses, replace the old landingpad instruction
5170 // with a new one.
5171 if (MakeNewInstruction) {
5173 NewClauses.size());
5174 for (Constant *C : NewClauses)
5175 NLI->addClause(C);
5176 // A landing pad with no clauses must have the cleanup flag set. It is
5177 // theoretically possible, though highly unlikely, that we eliminated all
5178 // clauses. If so, force the cleanup flag to true.
5179 if (NewClauses.empty())
5180 CleanupFlag = true;
5181 NLI->setCleanup(CleanupFlag);
5182 return NLI;
5183 }
5184
5185 // Even if none of the clauses changed, we may nonetheless have understood
5186 // that the cleanup flag is pointless. Clear it if so.
5187 if (LI.isCleanup() != CleanupFlag) {
5188 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
5189 LI.setCleanup(CleanupFlag);
5190 return &LI;
5191 }
5192
5193 return nullptr;
5194}
5195
5196Value *
5198 // Try to push freeze through instructions that propagate but don't produce
5199 // poison as far as possible. If an operand of freeze follows three
5200 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
5201 // guaranteed-non-poison operands then push the freeze through to the one
5202 // operand that is not guaranteed non-poison. The actual transform is as
5203 // follows.
5204 // Op1 = ... ; Op1 can be posion
5205 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
5206 // ; single guaranteed-non-poison operands
5207 // ... = Freeze(Op0)
5208 // =>
5209 // Op1 = ...
5210 // Op1.fr = Freeze(Op1)
5211 // ... = Inst(Op1.fr, NonPoisonOps...)
5212 auto *OrigOp = OrigFI.getOperand(0);
5213 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
5214
5215 // While we could change the other users of OrigOp to use freeze(OrigOp), that
5216 // potentially reduces their optimization potential, so let's only do this iff
5217 // the OrigOp is only used by the freeze.
5218 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
5219 return nullptr;
5220
5221 // We can't push the freeze through an instruction which can itself create
5222 // poison. If the only source of new poison is flags, we can simply
5223 // strip them (since we know the only use is the freeze and nothing can
5224 // benefit from them.)
5226 /*ConsiderFlagsAndMetadata*/ false))
5227 return nullptr;
5228
5229 // If operand is guaranteed not to be poison, there is no need to add freeze
5230 // to the operand. So we first find the operand that is not guaranteed to be
5231 // poison.
5232 Value *MaybePoisonOperand = nullptr;
5233 for (Value *V : OrigOpInst->operands()) {
5235 // Treat identical operands as a single operand.
5236 (MaybePoisonOperand && MaybePoisonOperand == V))
5237 continue;
5238 if (!MaybePoisonOperand)
5239 MaybePoisonOperand = V;
5240 else
5241 return nullptr;
5242 }
5243
5244 OrigOpInst->dropPoisonGeneratingAnnotations();
5245
5246 // If all operands are guaranteed to be non-poison, we can drop freeze.
5247 if (!MaybePoisonOperand)
5248 return OrigOp;
5249
5250 Builder.SetInsertPoint(OrigOpInst);
5251 Value *FrozenMaybePoisonOperand = Builder.CreateFreeze(
5252 MaybePoisonOperand, MaybePoisonOperand->getName() + ".fr");
5253
5254 OrigOpInst->replaceUsesOfWith(MaybePoisonOperand, FrozenMaybePoisonOperand);
5255 return OrigOp;
5256}
5257
5259 PHINode *PN) {
5260 // Detect whether this is a recurrence with a start value and some number of
5261 // backedge values. We'll check whether we can push the freeze through the
5262 // backedge values (possibly dropping poison flags along the way) until we
5263 // reach the phi again. In that case, we can move the freeze to the start
5264 // value.
5265 Use *StartU = nullptr;
5267 for (Use &U : PN->incoming_values()) {
5268 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5269 // Add backedge value to worklist.
5270 Worklist.push_back(U.get());
5271 continue;
5272 }
5273
5274 // Don't bother handling multiple start values.
5275 if (StartU)
5276 return nullptr;
5277 StartU = &U;
5278 }
5279
5280 if (!StartU || Worklist.empty())
5281 return nullptr; // Not a recurrence.
5282
5283 Value *StartV = StartU->get();
5284 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5285 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5286 // We can't insert freeze if the start value is the result of the
5287 // terminator (e.g. an invoke).
5288 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5289 return nullptr;
5290
5293 while (!Worklist.empty()) {
5294 Value *V = Worklist.pop_back_val();
5295 if (!Visited.insert(V).second)
5296 continue;
5297
5298 if (Visited.size() > 32)
5299 return nullptr; // Limit the total number of values we inspect.
5300
5301 // Assume that PN is non-poison, because it will be after the transform.
5302 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5303 continue;
5304
5307 /*ConsiderFlagsAndMetadata*/ false))
5308 return nullptr;
5309
5310 DropFlags.push_back(I);
5311 append_range(Worklist, I->operands());
5312 }
5313
5314 for (Instruction *I : DropFlags)
5315 I->dropPoisonGeneratingAnnotations();
5316
5317 if (StartNeedsFreeze) {
5318 Builder.SetInsertPoint(StartBB->getTerminator());
5319 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5320 StartV->getName() + ".fr");
5321 replaceUse(*StartU, FrozenStartV);
5322 }
5323 return replaceInstUsesWith(FI, PN);
5324}
5325
5327 Value *Op = FI.getOperand(0);
5328
5329 if (isa<Constant>(Op) || Op->hasOneUse())
5330 return false;
5331
5332 // Move the freeze directly after the definition of its operand, so that
5333 // it dominates the maximum number of uses. Note that it may not dominate
5334 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5335 // the normal/default destination. This is why the domination check in the
5336 // replacement below is still necessary.
5337 BasicBlock::iterator MoveBefore;
5338 if (isa<Argument>(Op)) {
5339 MoveBefore =
5341 } else {
5342 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5343 if (!MoveBeforeOpt)
5344 return false;
5345 MoveBefore = *MoveBeforeOpt;
5346 }
5347
5348 // Re-point iterator to come after any debug-info records.
5349 MoveBefore.setHeadBit(false);
5350
5351 bool Changed = false;
5352 if (&FI != &*MoveBefore) {
5353 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5354 Changed = true;
5355 }
5356
5357 Changed |= Op->replaceUsesWithIf(
5358 &FI, [&](Use &U) -> bool { return DT.dominates(&FI, U); });
5359
5360 return Changed;
5361}
5362
5363// Check if any direct or bitcast user of this value is a shuffle instruction.
5365 for (auto *U : V->users()) {
5367 return true;
5368 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5369 return true;
5370 }
5371 return false;
5372}
5373
5375 Value *Op0 = I.getOperand(0);
5376
5377 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5378 return replaceInstUsesWith(I, V);
5379
5380 // freeze (phi const, x) --> phi const, (freeze x)
5381 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5382 if (Instruction *NV = foldOpIntoPhi(I, PN))
5383 return NV;
5384 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5385 return NV;
5386 }
5387
5389 return replaceInstUsesWith(I, NI);
5390
5391 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5392 // - or: pick -1
5393 // - select's condition: if the true value is constant, choose it by making
5394 // the condition true.
5395 // - phi: pick the common constant across operands
5396 // - default: pick 0
5397 //
5398 // Note that this transform is intentionally done here rather than
5399 // via an analysis in InstSimplify or at individual user sites. That is
5400 // because we must produce the same value for all uses of the freeze -
5401 // it's the reason "freeze" exists!
5402 //
5403 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5404 // duplicating logic for binops at least.
5405 auto getUndefReplacement = [&](Type *Ty) {
5406 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5407 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5408 // removed.
5409 Constant *BestValue = nullptr;
5410 for (Value *V : PN.incoming_values()) {
5411 if (match(V, m_Freeze(m_Undef())))
5412 continue;
5413
5415 if (!C)
5416 return nullptr;
5417
5419 return nullptr;
5420
5421 if (BestValue && BestValue != C)
5422 return nullptr;
5423
5424 BestValue = C;
5425 }
5426 return BestValue;
5427 };
5428
5429 Value *NullValue = Constant::getNullValue(Ty);
5430 Value *BestValue = nullptr;
5431 for (auto *U : I.users()) {
5432 Value *V = NullValue;
5433 if (match(U, m_Or(m_Value(), m_Value())))
5435 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5436 V = ConstantInt::getTrue(Ty);
5437 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5438 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5439 V = NullValue;
5440 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5441 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5442 V = MaybeV;
5443 }
5444
5445 if (!BestValue)
5446 BestValue = V;
5447 else if (BestValue != V)
5448 BestValue = NullValue;
5449 }
5450 assert(BestValue && "Must have at least one use");
5451 assert(BestValue != &I && "Cannot replace with itself");
5452 return BestValue;
5453 };
5454
5455 if (match(Op0, m_Undef())) {
5456 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5457 // a shuffle. This may improve codegen for shuffles that allow
5458 // unspecified inputs.
5460 return nullptr;
5461 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5462 }
5463
5464 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5465 Type *Ty = C->getType();
5466 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5467 if (!VTy)
5468 return nullptr;
5469 unsigned NumElts = VTy->getNumElements();
5470 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5471 for (unsigned i = 0; i != NumElts; ++i) {
5472 Constant *EltC = C->getAggregateElement(i);
5473 if (EltC && !match(EltC, m_Undef())) {
5474 BestValue = EltC;
5475 break;
5476 }
5477 }
5478 return Constant::replaceUndefsWith(C, BestValue);
5479 };
5480
5481 Constant *C;
5482 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5483 !C->containsConstantExpression()) {
5484 if (Constant *Repl = getFreezeVectorReplacement(C))
5485 return replaceInstUsesWith(I, Repl);
5486 }
5487
5488 // Replace uses of Op with freeze(Op).
5489 if (freezeOtherUses(I))
5490 return &I;
5491
5492 return nullptr;
5493}
5494
5495/// Check for case where the call writes to an otherwise dead alloca. This
5496/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5497/// helper *only* analyzes the write; doesn't check any other legality aspect.
5499 auto *CB = dyn_cast<CallBase>(I);
5500 if (!CB)
5501 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5502 // to allow reload along used path as described below. Otherwise, this
5503 // is simply a store to a dead allocation which will be removed.
5504 return false;
5505 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5506 if (!Dest)
5507 return false;
5508 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5509 if (!AI)
5510 // TODO: allow malloc?
5511 return false;
5512 // TODO: allow memory access dominated by move point? Note that since AI
5513 // could have a reference to itself captured by the call, we would need to
5514 // account for cycles in doing so.
5515 SmallVector<const User *> AllocaUsers;
5517 auto pushUsers = [&](const Instruction &I) {
5518 for (const User *U : I.users()) {
5519 if (Visited.insert(U).second)
5520 AllocaUsers.push_back(U);
5521 }
5522 };
5523 pushUsers(*AI);
5524 while (!AllocaUsers.empty()) {
5525 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5526 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5527 pushUsers(*UserI);
5528 continue;
5529 }
5530 if (UserI == CB)
5531 continue;
5532 // TODO: support lifetime.start/end here
5533 return false;
5534 }
5535 return true;
5536}
5537
5538/// Try to move the specified instruction from its current block into the
5539/// beginning of DestBlock, which can only happen if it's safe to move the
5540/// instruction past all of the instructions between it and the end of its
5541/// block.
5543 BasicBlock *DestBlock) {
5544 BasicBlock *SrcBlock = I->getParent();
5545
5546 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5547 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5548 I->isTerminator())
5549 return false;
5550
5551 // Do not sink static or dynamic alloca instructions. Static allocas must
5552 // remain in the entry block, and dynamic allocas must not be sunk in between
5553 // a stacksave / stackrestore pair, which would incorrectly shorten its
5554 // lifetime.
5555 if (isa<AllocaInst>(I))
5556 return false;
5557
5558 // Do not sink into catchswitch blocks.
5559 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5560 return false;
5561
5562 // Do not sink convergent call instructions.
5563 if (auto *CI = dyn_cast<CallInst>(I)) {
5564 if (CI->isConvergent())
5565 return false;
5566 }
5567
5568 // Unless we can prove that the memory write isn't visibile except on the
5569 // path we're sinking to, we must bail.
5570 if (I->mayWriteToMemory()) {
5571 if (!SoleWriteToDeadLocal(I, TLI))
5572 return false;
5573 }
5574
5575 // We can only sink load instructions if there is nothing between the load and
5576 // the end of block that could change the value.
5577 if (I->mayReadFromMemory() &&
5578 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5579 // We don't want to do any sophisticated alias analysis, so we only check
5580 // the instructions after I in I's parent block if we try to sink to its
5581 // successor block.
5582 if (DestBlock->getUniquePredecessor() != I->getParent())
5583 return false;
5584 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5585 E = I->getParent()->end();
5586 Scan != E; ++Scan)
5587 if (Scan->mayWriteToMemory())
5588 return false;
5589 }
5590
5591 I->dropDroppableUses([&](const Use *U) {
5592 auto *I = dyn_cast<Instruction>(U->getUser());
5593 if (I && I->getParent() != DestBlock) {
5594 Worklist.add(I);
5595 return true;
5596 }
5597 return false;
5598 });
5599 /// FIXME: We could remove droppable uses that are not dominated by
5600 /// the new position.
5601
5602 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5603 I->moveBefore(*DestBlock, InsertPos);
5604 ++NumSunkInst;
5605
5606 // Also sink all related debug uses from the source basic block. Otherwise we
5607 // get debug use before the def. Attempt to salvage debug uses first, to
5608 // maximise the range variables have location for. If we cannot salvage, then
5609 // mark the location undef: we know it was supposed to receive a new location
5610 // here, but that computation has been sunk.
5611 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5612 findDbgUsers(I, DbgVariableRecords);
5613 if (!DbgVariableRecords.empty())
5614 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5615 DbgVariableRecords);
5616
5617 // PS: there are numerous flaws with this behaviour, not least that right now
5618 // assignments can be re-ordered past other assignments to the same variable
5619 // if they use different Values. Creating more undef assignements can never be
5620 // undone. And salvaging all users outside of this block can un-necessarily
5621 // alter the lifetime of the live-value that the variable refers to.
5622 // Some of these things can be resolved by tolerating debug use-before-defs in
5623 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5624 // being used for more architectures.
5625
5626 return true;
5627}
5628
5630 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5631 BasicBlock *DestBlock,
5632 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5633 // For all debug values in the destination block, the sunk instruction
5634 // will still be available, so they do not need to be dropped.
5635
5636 // Fetch all DbgVariableRecords not already in the destination.
5637 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5638 for (auto &DVR : DbgVariableRecords)
5639 if (DVR->getParent() != DestBlock)
5640 DbgVariableRecordsToSalvage.push_back(DVR);
5641
5642 // Fetch a second collection, of DbgVariableRecords in the source block that
5643 // we're going to sink.
5644 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5645 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5646 if (DVR->getParent() == SrcBlock)
5647 DbgVariableRecordsToSink.push_back(DVR);
5648
5649 // Sort DbgVariableRecords according to their position in the block. This is a
5650 // partial order: DbgVariableRecords attached to different instructions will
5651 // be ordered by the instruction order, but DbgVariableRecords attached to the
5652 // same instruction won't have an order.
5653 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5654 return B->getInstruction()->comesBefore(A->getInstruction());
5655 };
5656 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5657
5658 // If there are two assignments to the same variable attached to the same
5659 // instruction, the ordering between the two assignments is important. Scan
5660 // for this (rare) case and establish which is the last assignment.
5661 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5663 if (DbgVariableRecordsToSink.size() > 1) {
5665 // Count how many assignments to each variable there is per instruction.
5666 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5667 DebugVariable DbgUserVariable =
5668 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5669 DVR->getDebugLoc()->getInlinedAt());
5670 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5671 }
5672
5673 // If there are any instructions with two assignments, add them to the
5674 // FilterOutMap to record that they need extra filtering.
5676 for (auto It : CountMap) {
5677 if (It.second > 1) {
5678 FilterOutMap[It.first] = nullptr;
5679 DupSet.insert(It.first.first);
5680 }
5681 }
5682
5683 // For all instruction/variable pairs needing extra filtering, find the
5684 // latest assignment.
5685 for (const Instruction *Inst : DupSet) {
5686 for (DbgVariableRecord &DVR :
5687 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5688 DebugVariable DbgUserVariable =
5689 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5690 DVR.getDebugLoc()->getInlinedAt());
5691 auto FilterIt =
5692 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5693 if (FilterIt == FilterOutMap.end())
5694 continue;
5695 if (FilterIt->second != nullptr)
5696 continue;
5697 FilterIt->second = &DVR;
5698 }
5699 }
5700 }
5701
5702 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5703 // out any duplicate assignments identified above.
5705 SmallSet<DebugVariable, 4> SunkVariables;
5706 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5708 continue;
5709
5710 DebugVariable DbgUserVariable =
5711 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5712 DVR->getDebugLoc()->getInlinedAt());
5713
5714 // For any variable where there were multiple assignments in the same place,
5715 // ignore all but the last assignment.
5716 if (!FilterOutMap.empty()) {
5717 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5718 auto It = FilterOutMap.find(IVP);
5719
5720 // Filter out.
5721 if (It != FilterOutMap.end() && It->second != DVR)
5722 continue;
5723 }
5724
5725 if (!SunkVariables.insert(DbgUserVariable).second)
5726 continue;
5727
5728 if (DVR->isDbgAssign())
5729 continue;
5730
5731 DVRClones.emplace_back(DVR->clone());
5732 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5733 }
5734
5735 // Perform salvaging without the clones, then sink the clones.
5736 if (DVRClones.empty())
5737 return;
5738
5739 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5740
5741 // The clones are in reverse order of original appearance. Assert that the
5742 // head bit is set on the iterator as we _should_ have received it via
5743 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5744 // we'll repeatedly insert at the head, such as:
5745 // DVR-3 (third insertion goes here)
5746 // DVR-2 (second insertion goes here)
5747 // DVR-1 (first insertion goes here)
5748 // Any-Prior-DVRs
5749 // InsertPtInst
5750 assert(InsertPos.getHeadBit());
5751 for (DbgVariableRecord *DVRClone : DVRClones) {
5752 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5753 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5754 }
5755}
5756
5758 while (!Worklist.isEmpty()) {
5759 // Walk deferred instructions in reverse order, and push them to the
5760 // worklist, which means they'll end up popped from the worklist in-order.
5761 while (Instruction *I = Worklist.popDeferred()) {
5762 // Check to see if we can DCE the instruction. We do this already here to
5763 // reduce the number of uses and thus allow other folds to trigger.
5764 // Note that eraseInstFromFunction() may push additional instructions on
5765 // the deferred worklist, so this will DCE whole instruction chains.
5768 ++NumDeadInst;
5769 continue;
5770 }
5771
5772 Worklist.push(I);
5773 }
5774
5775 Instruction *I = Worklist.removeOne();
5776 if (I == nullptr) continue; // skip null values.
5777
5778 // Check to see if we can DCE the instruction.
5781 ++NumDeadInst;
5782 continue;
5783 }
5784
5785 if (!DebugCounter::shouldExecute(VisitCounter))
5786 continue;
5787
5788 // See if we can trivially sink this instruction to its user if we can
5789 // prove that the successor is not executed more frequently than our block.
5790 // Return the UserBlock if successful.
5791 auto getOptionalSinkBlockForInst =
5792 [this](Instruction *I) -> std::optional<BasicBlock *> {
5793 if (!EnableCodeSinking)
5794 return std::nullopt;
5795
5796 BasicBlock *BB = I->getParent();
5797 BasicBlock *UserParent = nullptr;
5798 unsigned NumUsers = 0;
5799
5800 for (Use &U : I->uses()) {
5801 User *User = U.getUser();
5802 if (User->isDroppable()) {
5803 // Do not sink if there are dereferenceable assumes that would be
5804 // removed.
5806 if (II->getIntrinsicID() != Intrinsic::assume ||
5807 !II->getOperandBundle("dereferenceable"))
5808 continue;
5809 }
5810
5811 if (NumUsers > MaxSinkNumUsers)
5812 return std::nullopt;
5813
5814 Instruction *UserInst = cast<Instruction>(User);
5815 // Special handling for Phi nodes - get the block the use occurs in.
5816 BasicBlock *UserBB = UserInst->getParent();
5817 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5818 UserBB = PN->getIncomingBlock(U);
5819 // Bail out if we have uses in different blocks. We don't do any
5820 // sophisticated analysis (i.e finding NearestCommonDominator of these
5821 // use blocks).
5822 if (UserParent && UserParent != UserBB)
5823 return std::nullopt;
5824 UserParent = UserBB;
5825
5826 // Make sure these checks are done only once, naturally we do the checks
5827 // the first time we get the userparent, this will save compile time.
5828 if (NumUsers == 0) {
5829 // Try sinking to another block. If that block is unreachable, then do
5830 // not bother. SimplifyCFG should handle it.
5831 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5832 return std::nullopt;
5833
5834 auto *Term = UserParent->getTerminator();
5835 // See if the user is one of our successors that has only one
5836 // predecessor, so that we don't have to split the critical edge.
5837 // Another option where we can sink is a block that ends with a
5838 // terminator that does not pass control to other block (such as
5839 // return or unreachable or resume). In this case:
5840 // - I dominates the User (by SSA form);
5841 // - the User will be executed at most once.
5842 // So sinking I down to User is always profitable or neutral.
5843 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5844 return std::nullopt;
5845
5846 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5847 }
5848
5849 NumUsers++;
5850 }
5851
5852 // No user or only has droppable users.
5853 if (!UserParent)
5854 return std::nullopt;
5855
5856 return UserParent;
5857 };
5858
5859 auto OptBB = getOptionalSinkBlockForInst(I);
5860 if (OptBB) {
5861 auto *UserParent = *OptBB;
5862 // Okay, the CFG is simple enough, try to sink this instruction.
5863 if (tryToSinkInstruction(I, UserParent)) {
5864 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5865 MadeIRChange = true;
5866 // We'll add uses of the sunk instruction below, but since
5867 // sinking can expose opportunities for it's *operands* add
5868 // them to the worklist
5869 for (Use &U : I->operands())
5870 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5871 Worklist.push(OpI);
5872 }
5873 }
5874
5875 // Now that we have an instruction, try combining it to simplify it.
5876 Builder.SetInsertPoint(I);
5877 Builder.CollectMetadataToCopy(
5878 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5879
5880#ifndef NDEBUG
5881 std::string OrigI;
5882#endif
5883 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5884 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5885
5886 if (Instruction *Result = visit(*I)) {
5887 ++NumCombined;
5888 // Should we replace the old instruction with a new one?
5889 if (Result != I) {
5890 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5891 << " New = " << *Result << '\n');
5892
5893 // We copy the old instruction's DebugLoc to the new instruction, unless
5894 // InstCombine already assigned a DebugLoc to it, in which case we
5895 // should trust the more specifically selected DebugLoc.
5896 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5897 // We also copy annotation metadata to the new instruction.
5898 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5899 // Everything uses the new instruction now.
5900 I->replaceAllUsesWith(Result);
5901
5902 // Move the name to the new instruction first.
5903 Result->takeName(I);
5904
5905 // Insert the new instruction into the basic block...
5906 BasicBlock *InstParent = I->getParent();
5907 BasicBlock::iterator InsertPos = I->getIterator();
5908
5909 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5910 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5911 // We need to fix up the insertion point.
5912 if (isa<PHINode>(I)) // PHI -> Non-PHI
5913 InsertPos = InstParent->getFirstInsertionPt();
5914 else // Non-PHI -> PHI
5915 InsertPos = InstParent->getFirstNonPHIIt();
5916 }
5917
5918 Result->insertInto(InstParent, InsertPos);
5919
5920 // Push the new instruction and any users onto the worklist.
5921 Worklist.pushUsersToWorkList(*Result);
5922 Worklist.push(Result);
5923
5925 } else {
5926 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5927 << " New = " << *I << '\n');
5928
5929 // If the instruction was modified, it's possible that it is now dead.
5930 // if so, remove it.
5933 } else {
5934 Worklist.pushUsersToWorkList(*I);
5935 Worklist.push(I);
5936 }
5937 }
5938 MadeIRChange = true;
5939 }
5940 }
5941
5942 Worklist.zap();
5943 return MadeIRChange;
5944}
5945
5946// Track the scopes used by !alias.scope and !noalias. In a function, a
5947// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5948// by both sets. If not, the declaration of the scope can be safely omitted.
5949// The MDNode of the scope can be omitted as well for the instructions that are
5950// part of this function. We do not do that at this point, as this might become
5951// too time consuming to do.
5953 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5954 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5955
5956public:
5958 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5959 if (!I->hasMetadataOtherThanDebugLoc())
5960 return;
5961
5962 auto Track = [](Metadata *ScopeList, auto &Container) {
5963 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5964 if (!MDScopeList || !Container.insert(MDScopeList).second)
5965 return;
5966 for (const auto &MDOperand : MDScopeList->operands())
5967 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5968 Container.insert(MDScope);
5969 };
5970
5971 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5972 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5973 }
5974
5977 if (!Decl)
5978 return false;
5979
5980 assert(Decl->use_empty() &&
5981 "llvm.experimental.noalias.scope.decl in use ?");
5982 const MDNode *MDSL = Decl->getScopeList();
5983 assert(MDSL->getNumOperands() == 1 &&
5984 "llvm.experimental.noalias.scope should refer to a single scope");
5985 auto &MDOperand = MDSL->getOperand(0);
5986 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5987 return !UsedAliasScopesAndLists.contains(MD) ||
5988 !UsedNoAliasScopesAndLists.contains(MD);
5989
5990 // Not an MDNode ? throw away.
5991 return true;
5992 }
5993};
5994
5995/// Populate the IC worklist from a function, by walking it in reverse
5996/// post-order and adding all reachable code to the worklist.
5997///
5998/// This has a couple of tricks to make the code faster and more powerful. In
5999/// particular, we constant fold and DCE instructions as we go, to avoid adding
6000/// them to the worklist (this significantly speeds up instcombine on code where
6001/// many instructions are dead or constant). Additionally, if we find a branch
6002/// whose condition is a known constant, we only visit the reachable successors.
6004 bool MadeIRChange = false;
6006 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
6007 DenseMap<Constant *, Constant *> FoldedConstants;
6008 AliasScopeTracker SeenAliasScopes;
6009
6010 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
6011 for (BasicBlock *Succ : successors(BB))
6012 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
6013 for (PHINode &PN : Succ->phis())
6014 for (Use &U : PN.incoming_values())
6015 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
6016 U.set(PoisonValue::get(PN.getType()));
6017 MadeIRChange = true;
6018 }
6019 };
6020
6021 for (BasicBlock *BB : RPOT) {
6022 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
6023 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
6024 })) {
6025 HandleOnlyLiveSuccessor(BB, nullptr);
6026 continue;
6027 }
6028 LiveBlocks.insert(BB);
6029
6030 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
6031 // ConstantProp instruction if trivially constant.
6032 if (!Inst.use_empty() &&
6033 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
6034 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
6035 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
6036 << '\n');
6037 Inst.replaceAllUsesWith(C);
6038 ++NumConstProp;
6039 if (isInstructionTriviallyDead(&Inst, &TLI))
6040 Inst.eraseFromParent();
6041 MadeIRChange = true;
6042 continue;
6043 }
6044
6045 // See if we can constant fold its operands.
6046 for (Use &U : Inst.operands()) {
6048 continue;
6049
6050 auto *C = cast<Constant>(U);
6051 Constant *&FoldRes = FoldedConstants[C];
6052 if (!FoldRes)
6053 FoldRes = ConstantFoldConstant(C, DL, &TLI);
6054
6055 if (FoldRes != C) {
6056 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
6057 << "\n Old = " << *C
6058 << "\n New = " << *FoldRes << '\n');
6059 U = FoldRes;
6060 MadeIRChange = true;
6061 }
6062 }
6063
6064 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
6065 // these call instructions consumes non-trivial amount of time and
6066 // provides no value for the optimization.
6067 if (!Inst.isDebugOrPseudoInst()) {
6068 InstrsForInstructionWorklist.push_back(&Inst);
6069 SeenAliasScopes.analyse(&Inst);
6070 }
6071 }
6072
6073 // If this is a branch or switch on a constant, mark only the single
6074 // live successor. Otherwise assume all successors are live.
6075 Instruction *TI = BB->getTerminator();
6076 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
6077 if (isa<UndefValue>(BI->getCondition())) {
6078 // Branch on undef is UB.
6079 HandleOnlyLiveSuccessor(BB, nullptr);
6080 continue;
6081 }
6082 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
6083 bool CondVal = Cond->getZExtValue();
6084 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
6085 continue;
6086 }
6087 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
6088 if (isa<UndefValue>(SI->getCondition())) {
6089 // Switch on undef is UB.
6090 HandleOnlyLiveSuccessor(BB, nullptr);
6091 continue;
6092 }
6093 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
6094 HandleOnlyLiveSuccessor(BB,
6095 SI->findCaseValue(Cond)->getCaseSuccessor());
6096 continue;
6097 }
6098 }
6099 }
6100
6101 // Remove instructions inside unreachable blocks. This prevents the
6102 // instcombine code from having to deal with some bad special cases, and
6103 // reduces use counts of instructions.
6104 for (BasicBlock &BB : F) {
6105 if (LiveBlocks.count(&BB))
6106 continue;
6107
6108 unsigned NumDeadInstInBB;
6109 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
6110
6111 MadeIRChange |= NumDeadInstInBB != 0;
6112 NumDeadInst += NumDeadInstInBB;
6113 }
6114
6115 // Once we've found all of the instructions to add to instcombine's worklist,
6116 // add them in reverse order. This way instcombine will visit from the top
6117 // of the function down. This jives well with the way that it adds all uses
6118 // of instructions to the worklist after doing a transformation, thus avoiding
6119 // some N^2 behavior in pathological cases.
6120 Worklist.reserve(InstrsForInstructionWorklist.size());
6121 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
6122 // DCE instruction if trivially dead. As we iterate in reverse program
6123 // order here, we will clean up whole chains of dead instructions.
6124 if (isInstructionTriviallyDead(Inst, &TLI) ||
6125 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
6126 ++NumDeadInst;
6127 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
6128 salvageDebugInfo(*Inst);
6129 Inst->eraseFromParent();
6130 MadeIRChange = true;
6131 continue;
6132 }
6133
6134 Worklist.push(Inst);
6135 }
6136
6137 return MadeIRChange;
6138}
6139
6141 // Collect backedges.
6143 for (BasicBlock *BB : RPOT) {
6144 Visited.insert(BB);
6145 for (BasicBlock *Succ : successors(BB))
6146 if (Visited.contains(Succ))
6147 BackEdges.insert({BB, Succ});
6148 }
6149 ComputedBackEdges = true;
6150}
6151
6157 const InstCombineOptions &Opts) {
6158 auto &DL = F.getDataLayout();
6159 bool VerifyFixpoint = Opts.VerifyFixpoint &&
6160 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
6161
6162 /// Builder - This is an IRBuilder that automatically inserts new
6163 /// instructions into the worklist when they are created.
6165 F.getContext(), TargetFolder(DL),
6166 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
6167 Worklist.add(I);
6168 if (auto *Assume = dyn_cast<AssumeInst>(I))
6169 AC.registerAssumption(Assume);
6170 }));
6171
6173
6174 // Lower dbg.declare intrinsics otherwise their value may be clobbered
6175 // by instcombiner.
6176 bool MadeIRChange = false;
6178 MadeIRChange = LowerDbgDeclare(F);
6179
6180 // Iterate while there is work to do.
6181 unsigned Iteration = 0;
6182 while (true) {
6183 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
6184 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
6185 << " on " << F.getName()
6186 << " reached; stopping without verifying fixpoint\n");
6187 break;
6188 }
6189
6190 ++Iteration;
6191 ++NumWorklistIterations;
6192 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
6193 << F.getName() << "\n");
6194
6195 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
6196 BPI, PSI, DL, RPOT);
6198 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
6199 MadeChangeInThisIteration |= IC.run();
6200 if (!MadeChangeInThisIteration)
6201 break;
6202
6203 MadeIRChange = true;
6204 if (Iteration > Opts.MaxIterations) {
6206 "Instruction Combining on " + Twine(F.getName()) +
6207 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
6208 " iterations. " +
6209 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6210 "'instcombine-no-verify-fixpoint' to suppress this error.");
6211 }
6212 }
6213
6214 if (Iteration == 1)
6215 ++NumOneIteration;
6216 else if (Iteration == 2)
6217 ++NumTwoIterations;
6218 else if (Iteration == 3)
6219 ++NumThreeIterations;
6220 else
6221 ++NumFourOrMoreIterations;
6222
6223 return MadeIRChange;
6224}
6225
6227
6229 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6230 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6231 OS, MapClassName2PassName);
6232 OS << '<';
6233 OS << "max-iterations=" << Options.MaxIterations << ";";
6234 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6235 OS << '>';
6236}
6237
6238char InstCombinePass::ID = 0;
6239
6242 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6243 // No changes since last InstCombine pass, exit early.
6244 if (LRT.shouldSkip(&ID))
6245 return PreservedAnalyses::all();
6246
6247 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6248 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6249 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6251 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6252
6253 auto *AA = &AM.getResult<AAManager>(F);
6254 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6255 ProfileSummaryInfo *PSI =
6256 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6257 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6258 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6260
6261 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6262 BFI, BPI, PSI, Options)) {
6263 // No changes, all analyses are preserved.
6264 LRT.update(&ID, /*Changed=*/false);
6265 return PreservedAnalyses::all();
6266 }
6267
6268 // Mark all the analyses that instcombine updates as preserved.
6270 LRT.update(&ID, /*Changed=*/true);
6273 return PA;
6274}
6275
6291
6293 if (skipFunction(F))
6294 return false;
6295
6296 // Required analyses.
6297 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6298 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6299 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6301 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6303
6304 // Optional analyses.
6305 ProfileSummaryInfo *PSI =
6307 BlockFrequencyInfo *BFI =
6308 (PSI && PSI->hasProfileSummary()) ?
6310 nullptr;
6311 BranchProbabilityInfo *BPI = nullptr;
6312 if (auto *WrapperPass =
6314 BPI = &WrapperPass->getBPI();
6315
6316 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6317 BFI, BPI, PSI, InstCombineOptions());
6318}
6319
6321
6323
6325 "Combine redundant instructions", false, false)
6336 "Combine redundant instructions", false, false)
6337
6338// Initialization Routines.
6342
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "(X ROp Y) LOp Z" is always equal to "(X LOp Z) ROp (Y LOp Z)".
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static Constant * constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector, Constant *Splat, bool SplatLHS, const DataLayout &DL)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static Instruction * foldSpliceBinOp(BinaryOperator &Inst, InstCombiner::BuilderTy &Builder)
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
The Input class is used to parse a yaml document into in-memory structs and vectors.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:213
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1769
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1901
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1939
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1971
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1952
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:219
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:491
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:438
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:781
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:522
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:809
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2025
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:537
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth=0)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1493
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:53
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:254
op_range operands()
Definition User.h:267
op_iterator op_begin()
Definition User.h:259
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:761
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:440
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:427
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:345
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:713
bool use_empty() const
Definition Value.h:347
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:893
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
Splat_match< T > m_ConstantSplat(const T &SubPattern)
Match a constant splat. TODO: Extend this to non-constant splats.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_VectorInsert(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:831
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2116
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition Metadata.cpp:64
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2503
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:257
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1730
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1618
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2486
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:406
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1813
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1680
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2055
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2432
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:323
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2019
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2146
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:267
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:264
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const