LLVM 19.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APInt.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
41#include "llvm/ADT/Statistic.h"
46#include "llvm/Analysis/CFG.h"
61#include "llvm/IR/BasicBlock.h"
62#include "llvm/IR/CFG.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DIBuilder.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
69#include "llvm/IR/Dominators.h"
71#include "llvm/IR/Function.h"
73#include "llvm/IR/IRBuilder.h"
74#include "llvm/IR/InstrTypes.h"
75#include "llvm/IR/Instruction.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/Metadata.h"
80#include "llvm/IR/Operator.h"
81#include "llvm/IR/PassManager.h"
83#include "llvm/IR/Type.h"
84#include "llvm/IR/Use.h"
85#include "llvm/IR/User.h"
86#include "llvm/IR/Value.h"
87#include "llvm/IR/ValueHandle.h"
92#include "llvm/Support/Debug.h"
100#include <algorithm>
101#include <cassert>
102#include <cstdint>
103#include <memory>
104#include <optional>
105#include <string>
106#include <utility>
107
108#define DEBUG_TYPE "instcombine"
110#include <optional>
111
112using namespace llvm;
113using namespace llvm::PatternMatch;
114
115STATISTIC(NumWorklistIterations,
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration, "Number of functions with one iteration");
118STATISTIC(NumTwoIterations, "Number of functions with two iterations");
119STATISTIC(NumThreeIterations, "Number of functions with three iterations");
120STATISTIC(NumFourOrMoreIterations,
121 "Number of functions with four or more iterations");
122
123STATISTIC(NumCombined , "Number of insts combined");
124STATISTIC(NumConstProp, "Number of constant folds");
125STATISTIC(NumDeadInst , "Number of dead inst eliminated");
126STATISTIC(NumSunkInst , "Number of instructions sunk");
127STATISTIC(NumExpand, "Number of expansions");
128STATISTIC(NumFactor , "Number of factorizations");
129STATISTIC(NumReassoc , "Number of reassociations");
130DEBUG_COUNTER(VisitCounter, "instcombine-visit",
131 "Controls which instructions are visited");
132
133static cl::opt<bool>
134EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
135 cl::init(true));
136
138 "instcombine-max-sink-users", cl::init(32),
139 cl::desc("Maximum number of undroppable users for instruction sinking"));
140
142MaxArraySize("instcombine-maxarray-size", cl::init(1024),
143 cl::desc("Maximum array size considered when doing a combine"));
144
145// FIXME: Remove this flag when it is no longer necessary to convert
146// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
147// increases variable availability at the cost of accuracy. Variables that
148// cannot be promoted by mem2reg or SROA will be described as living in memory
149// for their entire lifetime. However, passes like DSE and instcombine can
150// delete stores to the alloca, leading to misleading and inaccurate debug
151// information. This flag can be removed when those passes are fixed.
152static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
153 cl::Hidden, cl::init(true));
154
155std::optional<Instruction *>
157 // Handle target specific intrinsics
158 if (II.getCalledFunction()->isTargetIntrinsic()) {
159 return TTI.instCombineIntrinsic(*this, II);
160 }
161 return std::nullopt;
162}
163
165 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
166 bool &KnownBitsComputed) {
167 // Handle target specific intrinsics
168 if (II.getCalledFunction()->isTargetIntrinsic()) {
169 return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known,
170 KnownBitsComputed);
171 }
172 return std::nullopt;
173}
174
176 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
177 APInt &PoisonElts2, APInt &PoisonElts3,
178 std::function<void(Instruction *, unsigned, APInt, APInt &)>
179 SimplifyAndSetOp) {
180 // Handle target specific intrinsics
181 if (II.getCalledFunction()->isTargetIntrinsic()) {
183 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
184 SimplifyAndSetOp);
185 }
186 return std::nullopt;
187}
188
189bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
190 return TTI.isValidAddrSpaceCast(FromAS, ToAS);
191}
192
193Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
194 if (!RewriteGEP)
196
198 auto *Inst = dyn_cast<Instruction>(GEP);
199 if (Inst)
201
202 Value *Offset = EmitGEPOffset(GEP);
203 // If a non-trivial GEP has other uses, rewrite it to avoid duplicating
204 // the offset arithmetic.
205 if (Inst && !GEP->hasOneUse() && !GEP->hasAllConstantIndices() &&
206 !GEP->getSourceElementType()->isIntegerTy(8)) {
208 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
209 Offset, "", GEP->getNoWrapFlags()));
211 }
212 return Offset;
213}
214
215/// Legal integers and common types are considered desirable. This is used to
216/// avoid creating instructions with types that may not be supported well by the
217/// the backend.
218/// NOTE: This treats i8, i16 and i32 specially because they are common
219/// types in frontend languages.
220bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
221 switch (BitWidth) {
222 case 8:
223 case 16:
224 case 32:
225 return true;
226 default:
227 return DL.isLegalInteger(BitWidth);
228 }
229}
230
231/// Return true if it is desirable to convert an integer computation from a
232/// given bit width to a new bit width.
233/// We don't want to convert from a legal or desirable type (like i8) to an
234/// illegal type or from a smaller to a larger illegal type. A width of '1'
235/// is always treated as a desirable type because i1 is a fundamental type in
236/// IR, and there are many specialized optimizations for i1 types.
237/// Common/desirable widths are equally treated as legal to convert to, in
238/// order to open up more combining opportunities.
239bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
240 unsigned ToWidth) const {
241 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
242 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
243
244 // Convert to desirable widths even if they are not legal types.
245 // Only shrink types, to prevent infinite loops.
246 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
247 return true;
248
249 // If this is a legal or desiable integer from type, and the result would be
250 // an illegal type, don't do the transformation.
251 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
252 return false;
253
254 // Otherwise, if both are illegal, do not increase the size of the result. We
255 // do allow things like i160 -> i64, but not i64 -> i160.
256 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
257 return false;
258
259 return true;
260}
261
262/// Return true if it is desirable to convert a computation from 'From' to 'To'.
263/// We don't want to convert from a legal to an illegal type or from a smaller
264/// to a larger illegal type. i1 is always treated as a legal type because it is
265/// a fundamental type in IR, and there are many specialized optimizations for
266/// i1 types.
267bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
268 // TODO: This could be extended to allow vectors. Datalayout changes might be
269 // needed to properly support that.
270 if (!From->isIntegerTy() || !To->isIntegerTy())
271 return false;
272
273 unsigned FromWidth = From->getPrimitiveSizeInBits();
274 unsigned ToWidth = To->getPrimitiveSizeInBits();
275 return shouldChangeType(FromWidth, ToWidth);
276}
277
278// Return true, if No Signed Wrap should be maintained for I.
279// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
280// where both B and C should be ConstantInts, results in a constant that does
281// not overflow. This function only handles the Add and Sub opcodes. For
282// all other opcodes, the function conservatively returns false.
284 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
285 if (!OBO || !OBO->hasNoSignedWrap())
286 return false;
287
288 // We reason about Add and Sub Only.
289 Instruction::BinaryOps Opcode = I.getOpcode();
290 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
291 return false;
292
293 const APInt *BVal, *CVal;
294 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
295 return false;
296
297 bool Overflow = false;
298 if (Opcode == Instruction::Add)
299 (void)BVal->sadd_ov(*CVal, Overflow);
300 else
301 (void)BVal->ssub_ov(*CVal, Overflow);
302
303 return !Overflow;
304}
305
307 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
308 return OBO && OBO->hasNoUnsignedWrap();
309}
310
312 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
313 return OBO && OBO->hasNoSignedWrap();
314}
315
316/// Conservatively clears subclassOptionalData after a reassociation or
317/// commutation. We preserve fast-math flags when applicable as they can be
318/// preserved.
320 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
321 if (!FPMO) {
322 I.clearSubclassOptionalData();
323 return;
324 }
325
326 FastMathFlags FMF = I.getFastMathFlags();
327 I.clearSubclassOptionalData();
328 I.setFastMathFlags(FMF);
329}
330
331/// Combine constant operands of associative operations either before or after a
332/// cast to eliminate one of the associative operations:
333/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
334/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
336 InstCombinerImpl &IC) {
337 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
338 if (!Cast || !Cast->hasOneUse())
339 return false;
340
341 // TODO: Enhance logic for other casts and remove this check.
342 auto CastOpcode = Cast->getOpcode();
343 if (CastOpcode != Instruction::ZExt)
344 return false;
345
346 // TODO: Enhance logic for other BinOps and remove this check.
347 if (!BinOp1->isBitwiseLogicOp())
348 return false;
349
350 auto AssocOpcode = BinOp1->getOpcode();
351 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
352 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
353 return false;
354
355 Constant *C1, *C2;
356 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
357 !match(BinOp2->getOperand(1), m_Constant(C2)))
358 return false;
359
360 // TODO: This assumes a zext cast.
361 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
362 // to the destination type might lose bits.
363
364 // Fold the constants together in the destination type:
365 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
366 const DataLayout &DL = IC.getDataLayout();
367 Type *DestTy = C1->getType();
368 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
369 if (!CastC2)
370 return false;
371 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
372 if (!FoldedC)
373 return false;
374
375 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
376 IC.replaceOperand(*BinOp1, 1, FoldedC);
378 Cast->dropPoisonGeneratingFlags();
379 return true;
380}
381
382// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
383// inttoptr ( ptrtoint (x) ) --> x
384Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
385 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
386 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
387 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
388 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
389 Type *CastTy = IntToPtr->getDestTy();
390 if (PtrToInt &&
391 CastTy->getPointerAddressSpace() ==
392 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
393 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
394 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
395 return PtrToInt->getOperand(0);
396 }
397 return nullptr;
398}
399
400/// This performs a few simplifications for operators that are associative or
401/// commutative:
402///
403/// Commutative operators:
404///
405/// 1. Order operands such that they are listed from right (least complex) to
406/// left (most complex). This puts constants before unary operators before
407/// binary operators.
408///
409/// Associative operators:
410///
411/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
412/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
413///
414/// Associative and commutative operators:
415///
416/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
417/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
418/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
419/// if C1 and C2 are constants.
421 Instruction::BinaryOps Opcode = I.getOpcode();
422 bool Changed = false;
423
424 do {
425 // Order operands such that they are listed from right (least complex) to
426 // left (most complex). This puts constants before unary operators before
427 // binary operators.
428 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
429 getComplexity(I.getOperand(1)))
430 Changed = !I.swapOperands();
431
432 if (I.isCommutative()) {
433 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
434 replaceOperand(I, 0, Pair->first);
435 replaceOperand(I, 1, Pair->second);
436 Changed = true;
437 }
438 }
439
440 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
441 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
442
443 if (I.isAssociative()) {
444 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
445 if (Op0 && Op0->getOpcode() == Opcode) {
446 Value *A = Op0->getOperand(0);
447 Value *B = Op0->getOperand(1);
448 Value *C = I.getOperand(1);
449
450 // Does "B op C" simplify?
451 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
452 // It simplifies to V. Form "A op V".
453 replaceOperand(I, 0, A);
454 replaceOperand(I, 1, V);
455 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
456 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
457
458 // Conservatively clear all optional flags since they may not be
459 // preserved by the reassociation. Reset nsw/nuw based on the above
460 // analysis.
462
463 // Note: this is only valid because SimplifyBinOp doesn't look at
464 // the operands to Op0.
465 if (IsNUW)
466 I.setHasNoUnsignedWrap(true);
467
468 if (IsNSW)
469 I.setHasNoSignedWrap(true);
470
471 Changed = true;
472 ++NumReassoc;
473 continue;
474 }
475 }
476
477 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
478 if (Op1 && Op1->getOpcode() == Opcode) {
479 Value *A = I.getOperand(0);
480 Value *B = Op1->getOperand(0);
481 Value *C = Op1->getOperand(1);
482
483 // Does "A op B" simplify?
484 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
485 // It simplifies to V. Form "V op C".
486 replaceOperand(I, 0, V);
487 replaceOperand(I, 1, C);
488 // Conservatively clear the optional flags, since they may not be
489 // preserved by the reassociation.
491 Changed = true;
492 ++NumReassoc;
493 continue;
494 }
495 }
496 }
497
498 if (I.isAssociative() && I.isCommutative()) {
499 if (simplifyAssocCastAssoc(&I, *this)) {
500 Changed = true;
501 ++NumReassoc;
502 continue;
503 }
504
505 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
506 if (Op0 && Op0->getOpcode() == Opcode) {
507 Value *A = Op0->getOperand(0);
508 Value *B = Op0->getOperand(1);
509 Value *C = I.getOperand(1);
510
511 // Does "C op A" simplify?
512 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
513 // It simplifies to V. Form "V op B".
514 replaceOperand(I, 0, V);
515 replaceOperand(I, 1, B);
516 // Conservatively clear the optional flags, since they may not be
517 // preserved by the reassociation.
519 Changed = true;
520 ++NumReassoc;
521 continue;
522 }
523 }
524
525 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
526 if (Op1 && Op1->getOpcode() == Opcode) {
527 Value *A = I.getOperand(0);
528 Value *B = Op1->getOperand(0);
529 Value *C = Op1->getOperand(1);
530
531 // Does "C op A" simplify?
532 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
533 // It simplifies to V. Form "B op V".
534 replaceOperand(I, 0, B);
535 replaceOperand(I, 1, V);
536 // Conservatively clear the optional flags, since they may not be
537 // preserved by the reassociation.
539 Changed = true;
540 ++NumReassoc;
541 continue;
542 }
543 }
544
545 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
546 // if C1 and C2 are constants.
547 Value *A, *B;
548 Constant *C1, *C2, *CRes;
549 if (Op0 && Op1 &&
550 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
551 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
552 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
553 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
554 bool IsNUW = hasNoUnsignedWrap(I) &&
555 hasNoUnsignedWrap(*Op0) &&
556 hasNoUnsignedWrap(*Op1);
557 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
558 BinaryOperator::CreateNUW(Opcode, A, B) :
559 BinaryOperator::Create(Opcode, A, B);
560
561 if (isa<FPMathOperator>(NewBO)) {
562 FastMathFlags Flags = I.getFastMathFlags() &
563 Op0->getFastMathFlags() &
564 Op1->getFastMathFlags();
565 NewBO->setFastMathFlags(Flags);
566 }
567 InsertNewInstWith(NewBO, I.getIterator());
568 NewBO->takeName(Op1);
569 replaceOperand(I, 0, NewBO);
570 replaceOperand(I, 1, CRes);
571 // Conservatively clear the optional flags, since they may not be
572 // preserved by the reassociation.
574 if (IsNUW)
575 I.setHasNoUnsignedWrap(true);
576
577 Changed = true;
578 continue;
579 }
580 }
581
582 // No further simplifications.
583 return Changed;
584 } while (true);
585}
586
587/// Return whether "X LOp (Y ROp Z)" is always equal to
588/// "(X LOp Y) ROp (X LOp Z)".
591 // X & (Y | Z) <--> (X & Y) | (X & Z)
592 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
593 if (LOp == Instruction::And)
594 return ROp == Instruction::Or || ROp == Instruction::Xor;
595
596 // X | (Y & Z) <--> (X | Y) & (X | Z)
597 if (LOp == Instruction::Or)
598 return ROp == Instruction::And;
599
600 // X * (Y + Z) <--> (X * Y) + (X * Z)
601 // X * (Y - Z) <--> (X * Y) - (X * Z)
602 if (LOp == Instruction::Mul)
603 return ROp == Instruction::Add || ROp == Instruction::Sub;
604
605 return false;
606}
607
608/// Return whether "(X LOp Y) ROp Z" is always equal to
609/// "(X ROp Z) LOp (Y ROp Z)".
613 return leftDistributesOverRight(ROp, LOp);
614
615 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
617
618 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
619 // but this requires knowing that the addition does not overflow and other
620 // such subtleties.
621}
622
623/// This function returns identity value for given opcode, which can be used to
624/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
626 if (isa<Constant>(V))
627 return nullptr;
628
629 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
630}
631
632/// This function predicates factorization using distributive laws. By default,
633/// it just returns the 'Op' inputs. But for special-cases like
634/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
635/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
636/// allow more factorization opportunities.
639 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
640 assert(Op && "Expected a binary operator");
641 LHS = Op->getOperand(0);
642 RHS = Op->getOperand(1);
643 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
644 Constant *C;
645 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
646 // X << C --> X * (1 << C)
648 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
649 assert(RHS && "Constant folding of immediate constants failed");
650 return Instruction::Mul;
651 }
652 // TODO: We can add other conversions e.g. shr => div etc.
653 }
654 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
655 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
657 // lshr nneg C, X --> ashr nneg C, X
658 return Instruction::AShr;
659 }
660 }
661 return Op->getOpcode();
662}
663
664/// This tries to simplify binary operations by factorizing out common terms
665/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
668 Instruction::BinaryOps InnerOpcode, Value *A,
669 Value *B, Value *C, Value *D) {
670 assert(A && B && C && D && "All values must be provided");
671
672 Value *V = nullptr;
673 Value *RetVal = nullptr;
674 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
675 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
676
677 // Does "X op' Y" always equal "Y op' X"?
678 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
679
680 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
681 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
682 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
683 // commutative case, "(A op' B) op (C op' A)"?
684 if (A == C || (InnerCommutative && A == D)) {
685 if (A != C)
686 std::swap(C, D);
687 // Consider forming "A op' (B op D)".
688 // If "B op D" simplifies then it can be formed with no cost.
689 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
690
691 // If "B op D" doesn't simplify then only go on if one of the existing
692 // operations "A op' B" and "C op' D" will be zapped as no longer used.
693 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
694 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
695 if (V)
696 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
697 }
698 }
699
700 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
701 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
702 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
703 // commutative case, "(A op' B) op (B op' D)"?
704 if (B == D || (InnerCommutative && B == C)) {
705 if (B != D)
706 std::swap(C, D);
707 // Consider forming "(A op C) op' B".
708 // If "A op C" simplifies then it can be formed with no cost.
709 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
710
711 // If "A op C" doesn't simplify then only go on if one of the existing
712 // operations "A op' B" and "C op' D" will be zapped as no longer used.
713 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
714 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
715 if (V)
716 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
717 }
718 }
719
720 if (!RetVal)
721 return nullptr;
722
723 ++NumFactor;
724 RetVal->takeName(&I);
725
726 // Try to add no-overflow flags to the final value.
727 if (isa<OverflowingBinaryOperator>(RetVal)) {
728 bool HasNSW = false;
729 bool HasNUW = false;
730 if (isa<OverflowingBinaryOperator>(&I)) {
731 HasNSW = I.hasNoSignedWrap();
732 HasNUW = I.hasNoUnsignedWrap();
733 }
734 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
735 HasNSW &= LOBO->hasNoSignedWrap();
736 HasNUW &= LOBO->hasNoUnsignedWrap();
737 }
738
739 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
740 HasNSW &= ROBO->hasNoSignedWrap();
741 HasNUW &= ROBO->hasNoUnsignedWrap();
742 }
743
744 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
745 // We can propagate 'nsw' if we know that
746 // %Y = mul nsw i16 %X, C
747 // %Z = add nsw i16 %Y, %X
748 // =>
749 // %Z = mul nsw i16 %X, C+1
750 //
751 // iff C+1 isn't INT_MIN
752 const APInt *CInt;
753 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
754 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
755
756 // nuw can be propagated with any constant or nuw value.
757 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
758 }
759 }
760 return RetVal;
761}
762
763// If `I` has one Const operand and the other matches `(ctpop (not x))`,
764// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
765// This is only useful is the new subtract can fold so we only handle the
766// following cases:
767// 1) (add/sub/disjoint_or C, (ctpop (not x))
768// -> (add/sub/disjoint_or C', (ctpop x))
769// 1) (cmp pred C, (ctpop (not x))
770// -> (cmp pred C', (ctpop x))
772 unsigned Opc = I->getOpcode();
773 unsigned ConstIdx = 1;
774 switch (Opc) {
775 default:
776 return nullptr;
777 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
778 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
779 // is constant.
780 case Instruction::Sub:
781 ConstIdx = 0;
782 break;
783 case Instruction::ICmp:
784 // Signed predicates aren't correct in some edge cases like for i2 types, as
785 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
786 // comparisons against it are simplfied to unsigned.
787 if (cast<ICmpInst>(I)->isSigned())
788 return nullptr;
789 break;
790 case Instruction::Or:
791 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
792 return nullptr;
793 [[fallthrough]];
794 case Instruction::Add:
795 break;
796 }
797
798 Value *Op;
799 // Find ctpop.
800 if (!match(I->getOperand(1 - ConstIdx),
801 m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(Op)))))
802 return nullptr;
803
804 Constant *C;
805 // Check other operand is ImmConstant.
806 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
807 return nullptr;
808
809 Type *Ty = Op->getType();
810 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
811 // Need extra check for icmp. Note if this check is true, it generally means
812 // the icmp will simplify to true/false.
813 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
814 Constant *Cmp =
816 if (!Cmp || !Cmp->isZeroValue())
817 return nullptr;
818 }
819
820 // Check we can invert `(not x)` for free.
821 bool Consumes = false;
822 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
823 return nullptr;
824 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
825 assert(NotOp != nullptr &&
826 "Desync between isFreeToInvert and getFreelyInverted");
827
828 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
829
830 Value *R = nullptr;
831
832 // Do the transformation here to avoid potentially introducing an infinite
833 // loop.
834 switch (Opc) {
835 case Instruction::Sub:
836 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
837 break;
838 case Instruction::Or:
839 case Instruction::Add:
840 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
841 break;
842 case Instruction::ICmp:
843 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
844 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
845 break;
846 default:
847 llvm_unreachable("Unhandled Opcode");
848 }
849 assert(R != nullptr);
850 return replaceInstUsesWith(*I, R);
851}
852
853// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
854// IFF
855// 1) the logic_shifts match
856// 2) either both binops are binops and one is `and` or
857// BinOp1 is `and`
858// (logic_shift (inv_logic_shift C1, C), C) == C1 or
859//
860// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
861//
862// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
863// IFF
864// 1) the logic_shifts match
865// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
866//
867// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
868//
869// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
870// IFF
871// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
872// 2) Binop2 is `not`
873//
874// -> (arithmetic_shift Binop1((not X), Y), Amt)
875
877 const DataLayout &DL = I.getDataLayout();
878 auto IsValidBinOpc = [](unsigned Opc) {
879 switch (Opc) {
880 default:
881 return false;
882 case Instruction::And:
883 case Instruction::Or:
884 case Instruction::Xor:
885 case Instruction::Add:
886 // Skip Sub as we only match constant masks which will canonicalize to use
887 // add.
888 return true;
889 }
890 };
891
892 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
893 // constraints.
894 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
895 unsigned ShOpc) {
896 assert(ShOpc != Instruction::AShr);
897 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
898 ShOpc == Instruction::Shl;
899 };
900
901 auto GetInvShift = [](unsigned ShOpc) {
902 assert(ShOpc != Instruction::AShr);
903 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
904 };
905
906 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
907 unsigned ShOpc, Constant *CMask,
908 Constant *CShift) {
909 // If the BinOp1 is `and` we don't need to check the mask.
910 if (BinOpc1 == Instruction::And)
911 return true;
912
913 // For all other possible transfers we need complete distributable
914 // binop/shift (anything but `add` + `lshr`).
915 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
916 return false;
917
918 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
919 // vecs, otherwise the mask will be simplified and the following check will
920 // handle it).
921 if (BinOpc2 == Instruction::And)
922 return true;
923
924 // Otherwise, need mask that meets the below requirement.
925 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
926 Constant *MaskInvShift =
927 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
928 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
929 CMask;
930 };
931
932 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
933 Constant *CMask, *CShift;
934 Value *X, *Y, *ShiftedX, *Mask, *Shift;
935 if (!match(I.getOperand(ShOpnum),
936 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
937 return nullptr;
938 if (!match(I.getOperand(1 - ShOpnum),
939 m_BinOp(m_Value(ShiftedX), m_Value(Mask))))
940 return nullptr;
941
942 if (!match(ShiftedX, m_OneUse(m_Shift(m_Value(X), m_Specific(Shift)))))
943 return nullptr;
944
945 // Make sure we are matching instruction shifts and not ConstantExpr
946 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
947 auto *IX = dyn_cast<Instruction>(ShiftedX);
948 if (!IY || !IX)
949 return nullptr;
950
951 // LHS and RHS need same shift opcode
952 unsigned ShOpc = IY->getOpcode();
953 if (ShOpc != IX->getOpcode())
954 return nullptr;
955
956 // Make sure binop is real instruction and not ConstantExpr
957 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
958 if (!BO2)
959 return nullptr;
960
961 unsigned BinOpc = BO2->getOpcode();
962 // Make sure we have valid binops.
963 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
964 return nullptr;
965
966 if (ShOpc == Instruction::AShr) {
967 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
968 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
969 Value *NotX = Builder.CreateNot(X);
970 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
972 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
973 }
974
975 return nullptr;
976 }
977
978 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
979 // distribute to drop the shift irrelevant of constants.
980 if (BinOpc == I.getOpcode() &&
981 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
982 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
983 Value *NewBinOp1 = Builder.CreateBinOp(
984 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
985 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
986 }
987
988 // Otherwise we can only distribute by constant shifting the mask, so
989 // ensure we have constants.
990 if (!match(Shift, m_ImmConstant(CShift)))
991 return nullptr;
992 if (!match(Mask, m_ImmConstant(CMask)))
993 return nullptr;
994
995 // Check if we can distribute the binops.
996 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
997 return nullptr;
998
999 Constant *NewCMask =
1000 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1001 Value *NewBinOp2 = Builder.CreateBinOp(
1002 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1003 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1004 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1005 NewBinOp1, CShift);
1006 };
1007
1008 if (Instruction *R = MatchBinOp(0))
1009 return R;
1010 return MatchBinOp(1);
1011}
1012
1013// (Binop (zext C), (select C, T, F))
1014// -> (select C, (binop 1, T), (binop 0, F))
1015//
1016// (Binop (sext C), (select C, T, F))
1017// -> (select C, (binop -1, T), (binop 0, F))
1018//
1019// Attempt to simplify binary operations into a select with folded args, when
1020// one operand of the binop is a select instruction and the other operand is a
1021// zext/sext extension, whose value is the select condition.
1024 // TODO: this simplification may be extended to any speculatable instruction,
1025 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1026 Instruction::BinaryOps Opc = I.getOpcode();
1027 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1028 Value *A, *CondVal, *TrueVal, *FalseVal;
1029 Value *CastOp;
1030
1031 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1032 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1033 A->getType()->getScalarSizeInBits() == 1 &&
1034 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1035 m_Value(FalseVal)));
1036 };
1037
1038 // Make sure one side of the binop is a select instruction, and the other is a
1039 // zero/sign extension operating on a i1.
1040 if (MatchSelectAndCast(LHS, RHS))
1041 CastOp = LHS;
1042 else if (MatchSelectAndCast(RHS, LHS))
1043 CastOp = RHS;
1044 else
1045 return nullptr;
1046
1047 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1048 bool IsCastOpRHS = (CastOp == RHS);
1049 bool IsZExt = isa<ZExtInst>(CastOp);
1050 Constant *C;
1051
1052 if (IsTrueArm) {
1053 C = Constant::getNullValue(V->getType());
1054 } else if (IsZExt) {
1055 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1056 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1057 } else {
1058 C = Constant::getAllOnesValue(V->getType());
1059 }
1060
1061 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1062 : Builder.CreateBinOp(Opc, C, V);
1063 };
1064
1065 // If the value used in the zext/sext is the select condition, or the negated
1066 // of the select condition, the binop can be simplified.
1067 if (CondVal == A) {
1068 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1069 return SelectInst::Create(CondVal, NewTrueVal,
1070 NewFoldedConst(true, FalseVal));
1071 }
1072
1073 if (match(A, m_Not(m_Specific(CondVal)))) {
1074 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1075 return SelectInst::Create(CondVal, NewTrueVal,
1076 NewFoldedConst(false, FalseVal));
1077 }
1078
1079 return nullptr;
1080}
1081
1083 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1084 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1085 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1086 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1087 Value *A, *B, *C, *D;
1088 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1089
1090 if (Op0)
1091 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1092 if (Op1)
1093 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1094
1095 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1096 // a common term.
1097 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1098 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1099 return V;
1100
1101 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1102 // term.
1103 if (Op0)
1104 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1105 if (Value *V =
1106 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1107 return V;
1108
1109 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1110 // term.
1111 if (Op1)
1112 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1113 if (Value *V =
1114 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1115 return V;
1116
1117 return nullptr;
1118}
1119
1120/// This tries to simplify binary operations which some other binary operation
1121/// distributes over either by factorizing out common terms
1122/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1123/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1124/// Returns the simplified value, or null if it didn't simplify.
1126 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1127 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1128 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1129 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1130
1131 // Factorization.
1132 if (Value *R = tryFactorizationFolds(I))
1133 return R;
1134
1135 // Expansion.
1136 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1137 // The instruction has the form "(A op' B) op C". See if expanding it out
1138 // to "(A op C) op' (B op C)" results in simplifications.
1139 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1140 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1141
1142 // Disable the use of undef because it's not safe to distribute undef.
1143 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1144 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1145 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1146
1147 // Do "A op C" and "B op C" both simplify?
1148 if (L && R) {
1149 // They do! Return "L op' R".
1150 ++NumExpand;
1151 C = Builder.CreateBinOp(InnerOpcode, L, R);
1152 C->takeName(&I);
1153 return C;
1154 }
1155
1156 // Does "A op C" simplify to the identity value for the inner opcode?
1157 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1158 // They do! Return "B op C".
1159 ++NumExpand;
1160 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1161 C->takeName(&I);
1162 return C;
1163 }
1164
1165 // Does "B op C" simplify to the identity value for the inner opcode?
1166 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1167 // They do! Return "A op C".
1168 ++NumExpand;
1169 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1170 C->takeName(&I);
1171 return C;
1172 }
1173 }
1174
1175 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1176 // The instruction has the form "A op (B op' C)". See if expanding it out
1177 // to "(A op B) op' (A op C)" results in simplifications.
1178 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1179 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1180
1181 // Disable the use of undef because it's not safe to distribute undef.
1182 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1183 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1184 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1185
1186 // Do "A op B" and "A op C" both simplify?
1187 if (L && R) {
1188 // They do! Return "L op' R".
1189 ++NumExpand;
1190 A = Builder.CreateBinOp(InnerOpcode, L, R);
1191 A->takeName(&I);
1192 return A;
1193 }
1194
1195 // Does "A op B" simplify to the identity value for the inner opcode?
1196 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1197 // They do! Return "A op C".
1198 ++NumExpand;
1199 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1200 A->takeName(&I);
1201 return A;
1202 }
1203
1204 // Does "A op C" simplify to the identity value for the inner opcode?
1205 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1206 // They do! Return "A op B".
1207 ++NumExpand;
1208 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1209 A->takeName(&I);
1210 return A;
1211 }
1212 }
1213
1215}
1216
1217static std::optional<std::pair<Value *, Value *>>
1219 if (LHS->getParent() != RHS->getParent())
1220 return std::nullopt;
1221
1222 if (LHS->getNumIncomingValues() < 2)
1223 return std::nullopt;
1224
1225 if (!equal(LHS->blocks(), RHS->blocks()))
1226 return std::nullopt;
1227
1228 Value *L0 = LHS->getIncomingValue(0);
1229 Value *R0 = RHS->getIncomingValue(0);
1230
1231 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1232 Value *L1 = LHS->getIncomingValue(I);
1233 Value *R1 = RHS->getIncomingValue(I);
1234
1235 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1236 continue;
1237
1238 return std::nullopt;
1239 }
1240
1241 return std::optional(std::pair(L0, R0));
1242}
1243
1244std::optional<std::pair<Value *, Value *>>
1245InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1246 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1247 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1248 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1249 return std::nullopt;
1250 switch (LHSInst->getOpcode()) {
1251 case Instruction::PHI:
1252 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS));
1253 case Instruction::Select: {
1254 Value *Cond = LHSInst->getOperand(0);
1255 Value *TrueVal = LHSInst->getOperand(1);
1256 Value *FalseVal = LHSInst->getOperand(2);
1257 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1258 FalseVal == RHSInst->getOperand(1))
1259 return std::pair(TrueVal, FalseVal);
1260 return std::nullopt;
1261 }
1262 case Instruction::Call: {
1263 // Match min(a, b) and max(a, b)
1264 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1265 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1266 if (LHSMinMax && RHSMinMax &&
1267 LHSMinMax->getPredicate() ==
1269 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1270 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1271 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1272 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1273 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1274 return std::nullopt;
1275 }
1276 default:
1277 return std::nullopt;
1278 }
1279}
1280
1282 Value *LHS,
1283 Value *RHS) {
1284 Value *A, *B, *C, *D, *E, *F;
1285 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1286 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1287 if (!LHSIsSelect && !RHSIsSelect)
1288 return nullptr;
1289
1290 FastMathFlags FMF;
1292 if (isa<FPMathOperator>(&I)) {
1293 FMF = I.getFastMathFlags();
1295 }
1296
1297 Instruction::BinaryOps Opcode = I.getOpcode();
1299
1300 Value *Cond, *True = nullptr, *False = nullptr;
1301
1302 // Special-case for add/negate combination. Replace the zero in the negation
1303 // with the trailing add operand:
1304 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1305 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1306 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1307 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1308 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1309 return nullptr;
1310
1311 Value *N;
1312 if (True && match(FVal, m_Neg(m_Value(N)))) {
1313 Value *Sub = Builder.CreateSub(Z, N);
1314 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1315 }
1316 if (False && match(TVal, m_Neg(m_Value(N)))) {
1317 Value *Sub = Builder.CreateSub(Z, N);
1318 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1319 }
1320 return nullptr;
1321 };
1322
1323 if (LHSIsSelect && RHSIsSelect && A == D) {
1324 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1325 Cond = A;
1326 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1327 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1328
1329 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1330 if (False && !True)
1331 True = Builder.CreateBinOp(Opcode, B, E);
1332 else if (True && !False)
1333 False = Builder.CreateBinOp(Opcode, C, F);
1334 }
1335 } else if (LHSIsSelect && LHS->hasOneUse()) {
1336 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1337 Cond = A;
1338 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1339 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1340 if (Value *NewSel = foldAddNegate(B, C, RHS))
1341 return NewSel;
1342 } else if (RHSIsSelect && RHS->hasOneUse()) {
1343 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1344 Cond = D;
1345 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1346 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1347 if (Value *NewSel = foldAddNegate(E, F, LHS))
1348 return NewSel;
1349 }
1350
1351 if (!True || !False)
1352 return nullptr;
1353
1354 Value *SI = Builder.CreateSelect(Cond, True, False);
1355 SI->takeName(&I);
1356 return SI;
1357}
1358
1359/// Freely adapt every user of V as-if V was changed to !V.
1360/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1362 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1363 for (User *U : make_early_inc_range(I->users())) {
1364 if (U == IgnoredUser)
1365 continue; // Don't consider this user.
1366 switch (cast<Instruction>(U)->getOpcode()) {
1367 case Instruction::Select: {
1368 auto *SI = cast<SelectInst>(U);
1369 SI->swapValues();
1370 SI->swapProfMetadata();
1371 break;
1372 }
1373 case Instruction::Br: {
1374 BranchInst *BI = cast<BranchInst>(U);
1375 BI->swapSuccessors(); // swaps prof metadata too
1376 if (BPI)
1378 break;
1379 }
1380 case Instruction::Xor:
1381 replaceInstUsesWith(cast<Instruction>(*U), I);
1382 // Add to worklist for DCE.
1383 addToWorklist(cast<Instruction>(U));
1384 break;
1385 default:
1386 llvm_unreachable("Got unexpected user - out of sync with "
1387 "canFreelyInvertAllUsersOf() ?");
1388 }
1389 }
1390}
1391
1392/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1393/// constant zero (which is the 'negate' form).
1394Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1395 Value *NegV;
1396 if (match(V, m_Neg(m_Value(NegV))))
1397 return NegV;
1398
1399 // Constants can be considered to be negated values if they can be folded.
1400 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1401 return ConstantExpr::getNeg(C);
1402
1403 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1404 if (C->getType()->getElementType()->isIntegerTy())
1405 return ConstantExpr::getNeg(C);
1406
1407 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1408 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1409 Constant *Elt = CV->getAggregateElement(i);
1410 if (!Elt)
1411 return nullptr;
1412
1413 if (isa<UndefValue>(Elt))
1414 continue;
1415
1416 if (!isa<ConstantInt>(Elt))
1417 return nullptr;
1418 }
1419 return ConstantExpr::getNeg(CV);
1420 }
1421
1422 // Negate integer vector splats.
1423 if (auto *CV = dyn_cast<Constant>(V))
1424 if (CV->getType()->isVectorTy() &&
1425 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1426 return ConstantExpr::getNeg(CV);
1427
1428 return nullptr;
1429}
1430
1431// Try to fold:
1432// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1433// -> ({s|u}itofp (int_binop x, y))
1434// 2) (fp_binop ({s|u}itofp x), FpC)
1435// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1436//
1437// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1438Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1439 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1441
1442 Type *FPTy = BO.getType();
1443 Type *IntTy = IntOps[0]->getType();
1444
1445 unsigned IntSz = IntTy->getScalarSizeInBits();
1446 // This is the maximum number of inuse bits by the integer where the int -> fp
1447 // casts are exact.
1448 unsigned MaxRepresentableBits =
1450
1451 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1452 // checks later on.
1453 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1454
1455 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1456 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1457 auto IsNonZero = [&](unsigned OpNo) -> bool {
1458 if (OpsKnown[OpNo].hasKnownBits() &&
1459 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1460 return true;
1461 return isKnownNonZero(IntOps[OpNo], SQ);
1462 };
1463
1464 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1465 // NB: This matches the impl in ValueTracking, we just try to use cached
1466 // knownbits here. If we ever start supporting WithCache for
1467 // `isKnownNonNegative`, change this to an explicit call.
1468 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1469 };
1470
1471 // Check if we know for certain that ({s|u}itofp op) is exact.
1472 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1473 // Can we treat this operand as the desired sign?
1474 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1475 !IsNonNeg(OpNo))
1476 return false;
1477
1478 // If fp precision >= bitwidth(op) then its exact.
1479 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1480 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1481 // handled specially. We can't, however, increase the bound arbitrarily for
1482 // `sitofp` as for larger sizes, it won't sign extend.
1483 if (MaxRepresentableBits < IntSz) {
1484 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1485 // numSignBits(op).
1486 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1487 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1488 if (OpsFromSigned)
1489 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1490 // Finally for unsigned check that fp precision >= bitwidth(op) -
1491 // numLeadingZeros(op).
1492 else {
1493 NumUsedLeadingBits[OpNo] =
1494 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1495 }
1496 }
1497 // NB: We could also check if op is known to be a power of 2 or zero (which
1498 // will always be representable). Its unlikely, however, that is we are
1499 // unable to bound op in any way we will be able to pass the overflow checks
1500 // later on.
1501
1502 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1503 return false;
1504 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1505 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1506 IsNonZero(OpNo);
1507 };
1508
1509 // If we have a constant rhs, see if we can losslessly convert it to an int.
1510 if (Op1FpC != nullptr) {
1511 // Signed + Mul req non-zero
1512 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1513 !match(Op1FpC, m_NonZeroFP()))
1514 return nullptr;
1515
1517 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1518 IntTy, DL);
1519 if (Op1IntC == nullptr)
1520 return nullptr;
1521 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1522 : Instruction::UIToFP,
1523 Op1IntC, FPTy, DL) != Op1FpC)
1524 return nullptr;
1525
1526 // First try to keep sign of cast the same.
1527 IntOps[1] = Op1IntC;
1528 }
1529
1530 // Ensure lhs/rhs integer types match.
1531 if (IntTy != IntOps[1]->getType())
1532 return nullptr;
1533
1534 if (Op1FpC == nullptr) {
1535 if (!IsValidPromotion(1))
1536 return nullptr;
1537 }
1538 if (!IsValidPromotion(0))
1539 return nullptr;
1540
1541 // Final we check if the integer version of the binop will not overflow.
1543 // Because of the precision check, we can often rule out overflows.
1544 bool NeedsOverflowCheck = true;
1545 // Try to conservatively rule out overflow based on the already done precision
1546 // checks.
1547 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1548 unsigned OverflowMaxCurBits =
1549 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1550 bool OutputSigned = OpsFromSigned;
1551 switch (BO.getOpcode()) {
1552 case Instruction::FAdd:
1553 IntOpc = Instruction::Add;
1554 OverflowMaxOutputBits += OverflowMaxCurBits;
1555 break;
1556 case Instruction::FSub:
1557 IntOpc = Instruction::Sub;
1558 OverflowMaxOutputBits += OverflowMaxCurBits;
1559 break;
1560 case Instruction::FMul:
1561 IntOpc = Instruction::Mul;
1562 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1563 break;
1564 default:
1565 llvm_unreachable("Unsupported binop");
1566 }
1567 // The precision check may have already ruled out overflow.
1568 if (OverflowMaxOutputBits < IntSz) {
1569 NeedsOverflowCheck = false;
1570 // We can bound unsigned overflow from sub to in range signed value (this is
1571 // what allows us to avoid the overflow check for sub).
1572 if (IntOpc == Instruction::Sub)
1573 OutputSigned = true;
1574 }
1575
1576 // Precision check did not rule out overflow, so need to check.
1577 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1578 // `IntOps[...]` arguments to `KnownOps[...]`.
1579 if (NeedsOverflowCheck &&
1580 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1581 return nullptr;
1582
1583 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1584 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1585 IntBO->setHasNoSignedWrap(OutputSigned);
1586 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1587 }
1588 if (OutputSigned)
1589 return new SIToFPInst(IntBinOp, FPTy);
1590 return new UIToFPInst(IntBinOp, FPTy);
1591}
1592
1593// Try to fold:
1594// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1595// -> ({s|u}itofp (int_binop x, y))
1596// 2) (fp_binop ({s|u}itofp x), FpC)
1597// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1598Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1599 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1600 Constant *Op1FpC = nullptr;
1601 // Check for:
1602 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1603 // 2) (binop ({s|u}itofp x), FpC)
1604 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1605 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1606 return nullptr;
1607
1608 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1609 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1610 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1611 return nullptr;
1612
1613 // Cache KnownBits a bit to potentially save some analysis.
1614 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1615
1616 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1617 // different constraints depending on the sign of the cast.
1618 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1619 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1620 IntOps, Op1FpC, OpsKnown))
1621 return R;
1622 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1623 Op1FpC, OpsKnown);
1624}
1625
1626/// A binop with a constant operand and a sign-extended boolean operand may be
1627/// converted into a select of constants by applying the binary operation to
1628/// the constant with the two possible values of the extended boolean (0 or -1).
1629Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1630 // TODO: Handle non-commutative binop (constant is operand 0).
1631 // TODO: Handle zext.
1632 // TODO: Peek through 'not' of cast.
1633 Value *BO0 = BO.getOperand(0);
1634 Value *BO1 = BO.getOperand(1);
1635 Value *X;
1636 Constant *C;
1637 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1638 !X->getType()->isIntOrIntVectorTy(1))
1639 return nullptr;
1640
1641 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1644 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1645 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1646 return SelectInst::Create(X, TVal, FVal);
1647}
1648
1650 SelectInst *SI,
1651 bool IsTrueArm) {
1652 SmallVector<Constant *> ConstOps;
1653 for (Value *Op : I.operands()) {
1654 CmpInst::Predicate Pred;
1655 Constant *C = nullptr;
1656 if (Op == SI) {
1657 C = dyn_cast<Constant>(IsTrueArm ? SI->getTrueValue()
1658 : SI->getFalseValue());
1659 } else if (match(SI->getCondition(),
1660 m_ICmp(Pred, m_Specific(Op), m_Constant(C))) &&
1661 Pred == (IsTrueArm ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
1663 // Pass
1664 } else {
1665 C = dyn_cast<Constant>(Op);
1666 }
1667 if (C == nullptr)
1668 return nullptr;
1669
1670 ConstOps.push_back(C);
1671 }
1672
1673 return ConstantFoldInstOperands(&I, ConstOps, I.getDataLayout());
1674}
1675
1677 Value *NewOp, InstCombiner &IC) {
1678 Instruction *Clone = I.clone();
1679 Clone->replaceUsesOfWith(SI, NewOp);
1681 IC.InsertNewInstBefore(Clone, SI->getIterator());
1682 return Clone;
1683}
1684
1686 bool FoldWithMultiUse) {
1687 // Don't modify shared select instructions unless set FoldWithMultiUse
1688 if (!SI->hasOneUse() && !FoldWithMultiUse)
1689 return nullptr;
1690
1691 Value *TV = SI->getTrueValue();
1692 Value *FV = SI->getFalseValue();
1693 if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1694 return nullptr;
1695
1696 // Bool selects with constant operands can be folded to logical ops.
1697 if (SI->getType()->isIntOrIntVectorTy(1))
1698 return nullptr;
1699
1700 // Test if a FCmpInst instruction is used exclusively by a select as
1701 // part of a minimum or maximum operation. If so, refrain from doing
1702 // any other folding. This helps out other analyses which understand
1703 // non-obfuscated minimum and maximum idioms. And in this case, at
1704 // least one of the comparison operands has at least one user besides
1705 // the compare (the select), which would often largely negate the
1706 // benefit of folding anyway.
1707 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1708 if (CI->hasOneUse()) {
1709 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1710 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1711 return nullptr;
1712 }
1713 }
1714
1715 // Make sure that one of the select arms constant folds successfully.
1716 Value *NewTV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ true);
1717 Value *NewFV = constantFoldOperationIntoSelectOperand(Op, SI, /*IsTrueArm*/ false);
1718 if (!NewTV && !NewFV)
1719 return nullptr;
1720
1721 // Create an instruction for the arm that did not fold.
1722 if (!NewTV)
1723 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1724 if (!NewFV)
1725 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1726 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1727}
1728
1730 Value *InValue, BasicBlock *InBB,
1731 const DataLayout &DL,
1732 const SimplifyQuery SQ) {
1733 // NB: It is a precondition of this transform that the operands be
1734 // phi translatable! This is usually trivially satisfied by limiting it
1735 // to constant ops, and for selects we do a more sophisticated check.
1737 for (Value *Op : I.operands()) {
1738 if (Op == PN)
1739 Ops.push_back(InValue);
1740 else
1741 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1742 }
1743
1744 // Don't consider the simplification successful if we get back a constant
1745 // expression. That's just an instruction in hiding.
1746 // Also reject the case where we simplify back to the phi node. We wouldn't
1747 // be able to remove it in that case.
1749 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1750 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1751 return NewVal;
1752
1753 // Check if incoming PHI value can be replaced with constant
1754 // based on implied condition.
1755 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1756 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1757 if (TerminatorBI && TerminatorBI->isConditional() &&
1758 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1759 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1760 std::optional<bool> ImpliedCond =
1761 isImpliedCondition(TerminatorBI->getCondition(), ICmp->getPredicate(),
1762 Ops[0], Ops[1], DL, LHSIsTrue);
1763 if (ImpliedCond)
1764 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1765 }
1766
1767 return nullptr;
1768}
1769
1771 unsigned NumPHIValues = PN->getNumIncomingValues();
1772 if (NumPHIValues == 0)
1773 return nullptr;
1774
1775 // We normally only transform phis with a single use. However, if a PHI has
1776 // multiple uses and they are all the same operation, we can fold *all* of the
1777 // uses into the PHI.
1778 if (!PN->hasOneUse()) {
1779 // Walk the use list for the instruction, comparing them to I.
1780 for (User *U : PN->users()) {
1781 Instruction *UI = cast<Instruction>(U);
1782 if (UI != &I && !I.isIdenticalTo(UI))
1783 return nullptr;
1784 }
1785 // Otherwise, we can replace *all* users with the new PHI we form.
1786 }
1787
1788 // Check to see whether the instruction can be folded into each phi operand.
1789 // If there is one operand that does not fold, remember the BB it is in.
1790 // If there is more than one or if *it* is a PHI, bail out.
1791 SmallVector<Value *> NewPhiValues;
1792 BasicBlock *NonSimplifiedBB = nullptr;
1793 Value *NonSimplifiedInVal = nullptr;
1794 for (unsigned i = 0; i != NumPHIValues; ++i) {
1795 Value *InVal = PN->getIncomingValue(i);
1796 BasicBlock *InBB = PN->getIncomingBlock(i);
1797
1798 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1799 NewPhiValues.push_back(NewVal);
1800 continue;
1801 }
1802
1803 if (NonSimplifiedBB) return nullptr; // More than one non-simplified value.
1804
1805 NonSimplifiedBB = InBB;
1806 NonSimplifiedInVal = InVal;
1807 NewPhiValues.push_back(nullptr);
1808
1809 // If the InVal is an invoke at the end of the pred block, then we can't
1810 // insert a computation after it without breaking the edge.
1811 if (isa<InvokeInst>(InVal))
1812 if (cast<Instruction>(InVal)->getParent() == NonSimplifiedBB)
1813 return nullptr;
1814
1815 // If the incoming non-constant value is reachable from the phis block,
1816 // we'll push the operation across a loop backedge. This could result in
1817 // an infinite combine loop, and is generally non-profitable (especially
1818 // if the operation was originally outside the loop).
1819 if (isPotentiallyReachable(PN->getParent(), NonSimplifiedBB, nullptr, &DT,
1820 LI))
1821 return nullptr;
1822 }
1823
1824 // If there is exactly one non-simplified value, we can insert a copy of the
1825 // operation in that block. However, if this is a critical edge, we would be
1826 // inserting the computation on some other paths (e.g. inside a loop). Only
1827 // do this if the pred block is unconditionally branching into the phi block.
1828 // Also, make sure that the pred block is not dead code.
1829 if (NonSimplifiedBB != nullptr) {
1830 BranchInst *BI = dyn_cast<BranchInst>(NonSimplifiedBB->getTerminator());
1831 if (!BI || !BI->isUnconditional() ||
1832 !DT.isReachableFromEntry(NonSimplifiedBB))
1833 return nullptr;
1834 }
1835
1836 // Okay, we can do the transformation: create the new PHI node.
1837 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1838 InsertNewInstBefore(NewPN, PN->getIterator());
1839 NewPN->takeName(PN);
1840 NewPN->setDebugLoc(PN->getDebugLoc());
1841
1842 // If we are going to have to insert a new computation, do so right before the
1843 // predecessor's terminator.
1844 Instruction *Clone = nullptr;
1845 if (NonSimplifiedBB) {
1846 Clone = I.clone();
1847 for (Use &U : Clone->operands()) {
1848 if (U == PN)
1849 U = NonSimplifiedInVal;
1850 else
1851 U = U->DoPHITranslation(PN->getParent(), NonSimplifiedBB);
1852 }
1853 InsertNewInstBefore(Clone, NonSimplifiedBB->getTerminator()->getIterator());
1854 }
1855
1856 for (unsigned i = 0; i != NumPHIValues; ++i) {
1857 if (NewPhiValues[i])
1858 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1859 else
1860 NewPN->addIncoming(Clone, PN->getIncomingBlock(i));
1861 }
1862
1863 for (User *U : make_early_inc_range(PN->users())) {
1864 Instruction *User = cast<Instruction>(U);
1865 if (User == &I) continue;
1866 replaceInstUsesWith(*User, NewPN);
1868 }
1869
1870 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1871 const_cast<PHINode &>(*NewPN),
1872 const_cast<PHINode &>(*PN), DT);
1873 return replaceInstUsesWith(I, NewPN);
1874}
1875
1877 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1878 // we are guarding against replicating the binop in >1 predecessor.
1879 // This could miss matching a phi with 2 constant incoming values.
1880 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1881 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1882 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1883 Phi0->getNumOperands() != Phi1->getNumOperands())
1884 return nullptr;
1885
1886 // TODO: Remove the restriction for binop being in the same block as the phis.
1887 if (BO.getParent() != Phi0->getParent() ||
1888 BO.getParent() != Phi1->getParent())
1889 return nullptr;
1890
1891 // Fold if there is at least one specific constant value in phi0 or phi1's
1892 // incoming values that comes from the same block and this specific constant
1893 // value can be used to do optimization for specific binary operator.
1894 // For example:
1895 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1896 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1897 // %add = add i32 %phi0, %phi1
1898 // ==>
1899 // %add = phi i32 [%j, %bb0], [%i, %bb1]
1901 /*AllowRHSConstant*/ false);
1902 if (C) {
1903 SmallVector<Value *, 4> NewIncomingValues;
1904 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1905 auto &Phi0Use = std::get<0>(T);
1906 auto &Phi1Use = std::get<1>(T);
1907 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1908 return false;
1909 Value *Phi0UseV = Phi0Use.get();
1910 Value *Phi1UseV = Phi1Use.get();
1911 if (Phi0UseV == C)
1912 NewIncomingValues.push_back(Phi1UseV);
1913 else if (Phi1UseV == C)
1914 NewIncomingValues.push_back(Phi0UseV);
1915 else
1916 return false;
1917 return true;
1918 };
1919
1920 if (all_of(zip(Phi0->operands(), Phi1->operands()),
1921 CanFoldIncomingValuePair)) {
1922 PHINode *NewPhi =
1923 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1924 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1925 "The number of collected incoming values should equal the number "
1926 "of the original PHINode operands!");
1927 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1928 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1929 return NewPhi;
1930 }
1931 }
1932
1933 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1934 return nullptr;
1935
1936 // Match a pair of incoming constants for one of the predecessor blocks.
1937 BasicBlock *ConstBB, *OtherBB;
1938 Constant *C0, *C1;
1939 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
1940 ConstBB = Phi0->getIncomingBlock(0);
1941 OtherBB = Phi0->getIncomingBlock(1);
1942 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
1943 ConstBB = Phi0->getIncomingBlock(1);
1944 OtherBB = Phi0->getIncomingBlock(0);
1945 } else {
1946 return nullptr;
1947 }
1948 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
1949 return nullptr;
1950
1951 // The block that we are hoisting to must reach here unconditionally.
1952 // Otherwise, we could be speculatively executing an expensive or
1953 // non-speculative op.
1954 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
1955 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1956 !DT.isReachableFromEntry(OtherBB))
1957 return nullptr;
1958
1959 // TODO: This check could be tightened to only apply to binops (div/rem) that
1960 // are not safe to speculatively execute. But that could allow hoisting
1961 // potentially expensive instructions (fdiv for example).
1962 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
1964 return nullptr;
1965
1966 // Fold constants for the predecessor block with constant incoming values.
1967 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
1968 if (!NewC)
1969 return nullptr;
1970
1971 // Make a new binop in the predecessor block with the non-constant incoming
1972 // values.
1973 Builder.SetInsertPoint(PredBlockBranch);
1974 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
1975 Phi0->getIncomingValueForBlock(OtherBB),
1976 Phi1->getIncomingValueForBlock(OtherBB));
1977 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1978 NotFoldedNewBO->copyIRFlags(&BO);
1979
1980 // Replace the binop with a phi of the new values. The old phis are dead.
1981 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
1982 NewPhi->addIncoming(NewBO, OtherBB);
1983 NewPhi->addIncoming(NewC, ConstBB);
1984 return NewPhi;
1985}
1986
1988 if (!isa<Constant>(I.getOperand(1)))
1989 return nullptr;
1990
1991 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
1992 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
1993 return NewSel;
1994 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
1995 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
1996 return NewPhi;
1997 }
1998 return nullptr;
1999}
2000
2002 // If this GEP has only 0 indices, it is the same pointer as
2003 // Src. If Src is not a trivial GEP too, don't combine
2004 // the indices.
2005 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2006 !Src.hasOneUse())
2007 return false;
2008 return true;
2009}
2010
2012 if (!isa<VectorType>(Inst.getType()))
2013 return nullptr;
2014
2015 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2016 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2017 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2018 cast<VectorType>(Inst.getType())->getElementCount());
2019 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2020 cast<VectorType>(Inst.getType())->getElementCount());
2021
2022 // If both operands of the binop are vector concatenations, then perform the
2023 // narrow binop on each pair of the source operands followed by concatenation
2024 // of the results.
2025 Value *L0, *L1, *R0, *R1;
2026 ArrayRef<int> Mask;
2027 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2028 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2029 LHS->hasOneUse() && RHS->hasOneUse() &&
2030 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2031 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2032 // This transform does not have the speculative execution constraint as
2033 // below because the shuffle is a concatenation. The new binops are
2034 // operating on exactly the same elements as the existing binop.
2035 // TODO: We could ease the mask requirement to allow different undef lanes,
2036 // but that requires an analysis of the binop-with-undef output value.
2037 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2038 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2039 BO->copyIRFlags(&Inst);
2040 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2041 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2042 BO->copyIRFlags(&Inst);
2043 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2044 }
2045
2046 auto createBinOpReverse = [&](Value *X, Value *Y) {
2047 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2048 if (auto *BO = dyn_cast<BinaryOperator>(V))
2049 BO->copyIRFlags(&Inst);
2050 Module *M = Inst.getModule();
2051 Function *F =
2052 Intrinsic::getDeclaration(M, Intrinsic::vector_reverse, V->getType());
2053 return CallInst::Create(F, V);
2054 };
2055
2056 // NOTE: Reverse shuffles don't require the speculative execution protection
2057 // below because they don't affect which lanes take part in the computation.
2058
2059 Value *V1, *V2;
2060 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2061 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2062 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2063 (LHS->hasOneUse() || RHS->hasOneUse() ||
2064 (LHS == RHS && LHS->hasNUses(2))))
2065 return createBinOpReverse(V1, V2);
2066
2067 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2068 if (LHS->hasOneUse() && isSplatValue(RHS))
2069 return createBinOpReverse(V1, RHS);
2070 }
2071 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2072 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2073 return createBinOpReverse(LHS, V2);
2074
2075 // It may not be safe to reorder shuffles and things like div, urem, etc.
2076 // because we may trap when executing those ops on unknown vector elements.
2077 // See PR20059.
2078 if (!isSafeToSpeculativelyExecute(&Inst))
2079 return nullptr;
2080
2081 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2082 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2083 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2084 BO->copyIRFlags(&Inst);
2085 return new ShuffleVectorInst(XY, M);
2086 };
2087
2088 // If both arguments of the binary operation are shuffles that use the same
2089 // mask and shuffle within a single vector, move the shuffle after the binop.
2090 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2091 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2092 V1->getType() == V2->getType() &&
2093 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2094 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2095 return createBinOpShuffle(V1, V2, Mask);
2096 }
2097
2098 // If both arguments of a commutative binop are select-shuffles that use the
2099 // same mask with commuted operands, the shuffles are unnecessary.
2100 if (Inst.isCommutative() &&
2101 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2102 match(RHS,
2103 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2104 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2105 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2106 // TODO: Allow shuffles that contain undefs in the mask?
2107 // That is legal, but it reduces undef knowledge.
2108 // TODO: Allow arbitrary shuffles by shuffling after binop?
2109 // That might be legal, but we have to deal with poison.
2110 if (LShuf->isSelect() &&
2111 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2112 RShuf->isSelect() &&
2113 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2114 // Example:
2115 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2116 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2117 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2118 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2119 NewBO->copyIRFlags(&Inst);
2120 return NewBO;
2121 }
2122 }
2123
2124 // If one argument is a shuffle within one vector and the other is a constant,
2125 // try moving the shuffle after the binary operation. This canonicalization
2126 // intends to move shuffles closer to other shuffles and binops closer to
2127 // other binops, so they can be folded. It may also enable demanded elements
2128 // transforms.
2129 Constant *C;
2130 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
2131 if (InstVTy &&
2133 m_Mask(Mask))),
2134 m_ImmConstant(C))) &&
2135 cast<FixedVectorType>(V1->getType())->getNumElements() <=
2136 InstVTy->getNumElements()) {
2137 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
2138 "Shuffle should not change scalar type");
2139
2140 // Find constant NewC that has property:
2141 // shuffle(NewC, ShMask) = C
2142 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
2143 // reorder is not possible. A 1-to-1 mapping is not required. Example:
2144 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
2145 bool ConstOp1 = isa<Constant>(RHS);
2146 ArrayRef<int> ShMask = Mask;
2147 unsigned SrcVecNumElts =
2148 cast<FixedVectorType>(V1->getType())->getNumElements();
2149 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2150 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar);
2151 bool MayChange = true;
2152 unsigned NumElts = InstVTy->getNumElements();
2153 for (unsigned I = 0; I < NumElts; ++I) {
2154 Constant *CElt = C->getAggregateElement(I);
2155 if (ShMask[I] >= 0) {
2156 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2157 Constant *NewCElt = NewVecC[ShMask[I]];
2158 // Bail out if:
2159 // 1. The constant vector contains a constant expression.
2160 // 2. The shuffle needs an element of the constant vector that can't
2161 // be mapped to a new constant vector.
2162 // 3. This is a widening shuffle that copies elements of V1 into the
2163 // extended elements (extending with poison is allowed).
2164 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2165 I >= SrcVecNumElts) {
2166 MayChange = false;
2167 break;
2168 }
2169 NewVecC[ShMask[I]] = CElt;
2170 }
2171 // If this is a widening shuffle, we must be able to extend with poison
2172 // elements. If the original binop does not produce a poison in the high
2173 // lanes, then this transform is not safe.
2174 // Similarly for poison lanes due to the shuffle mask, we can only
2175 // transform binops that preserve poison.
2176 // TODO: We could shuffle those non-poison constant values into the
2177 // result by using a constant vector (rather than an poison vector)
2178 // as operand 1 of the new binop, but that might be too aggressive
2179 // for target-independent shuffle creation.
2180 if (I >= SrcVecNumElts || ShMask[I] < 0) {
2181 Constant *MaybePoison =
2182 ConstOp1
2183 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL)
2184 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL);
2185 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2186 MayChange = false;
2187 break;
2188 }
2189 }
2190 }
2191 if (MayChange) {
2192 Constant *NewC = ConstantVector::get(NewVecC);
2193 // It may not be safe to execute a binop on a vector with poison elements
2194 // because the entire instruction can be folded to undef or create poison
2195 // that did not exist in the original code.
2196 // TODO: The shift case should not be necessary.
2197 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
2198 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2199
2200 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2201 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2202 Value *NewLHS = ConstOp1 ? V1 : NewC;
2203 Value *NewRHS = ConstOp1 ? NewC : V1;
2204 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2205 }
2206 }
2207
2208 // Try to reassociate to sink a splat shuffle after a binary operation.
2209 if (Inst.isAssociative() && Inst.isCommutative()) {
2210 // Canonicalize shuffle operand as LHS.
2211 if (isa<ShuffleVectorInst>(RHS))
2212 std::swap(LHS, RHS);
2213
2214 Value *X;
2215 ArrayRef<int> MaskC;
2216 int SplatIndex;
2217 Value *Y, *OtherOp;
2218 if (!match(LHS,
2219 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2220 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2221 X->getType() != Inst.getType() ||
2222 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2223 return nullptr;
2224
2225 // FIXME: This may not be safe if the analysis allows undef elements. By
2226 // moving 'Y' before the splat shuffle, we are implicitly assuming
2227 // that it is not undef/poison at the splat index.
2228 if (isSplatValue(OtherOp, SplatIndex)) {
2229 std::swap(Y, OtherOp);
2230 } else if (!isSplatValue(Y, SplatIndex)) {
2231 return nullptr;
2232 }
2233
2234 // X and Y are splatted values, so perform the binary operation on those
2235 // values followed by a splat followed by the 2nd binary operation:
2236 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2237 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2238 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2239 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2240 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2241
2242 // Intersect FMF on both new binops. Other (poison-generating) flags are
2243 // dropped to be safe.
2244 if (isa<FPMathOperator>(R)) {
2245 R->copyFastMathFlags(&Inst);
2246 R->andIRFlags(RHS);
2247 }
2248 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2249 NewInstBO->copyIRFlags(R);
2250 return R;
2251 }
2252
2253 return nullptr;
2254}
2255
2256/// Try to narrow the width of a binop if at least 1 operand is an extend of
2257/// of a value. This requires a potentially expensive known bits check to make
2258/// sure the narrow op does not overflow.
2259Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2260 // We need at least one extended operand.
2261 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2262
2263 // If this is a sub, we swap the operands since we always want an extension
2264 // on the RHS. The LHS can be an extension or a constant.
2265 if (BO.getOpcode() == Instruction::Sub)
2266 std::swap(Op0, Op1);
2267
2268 Value *X;
2269 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2270 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2271 return nullptr;
2272
2273 // If both operands are the same extension from the same source type and we
2274 // can eliminate at least one (hasOneUse), this might work.
2275 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2276 Value *Y;
2277 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2278 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2279 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2280 // If that did not match, see if we have a suitable constant operand.
2281 // Truncating and extending must produce the same constant.
2282 Constant *WideC;
2283 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2284 return nullptr;
2285 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
2286 if (!NarrowC)
2287 return nullptr;
2288 Y = NarrowC;
2289 }
2290
2291 // Swap back now that we found our operands.
2292 if (BO.getOpcode() == Instruction::Sub)
2293 std::swap(X, Y);
2294
2295 // Both operands have narrow versions. Last step: the math must not overflow
2296 // in the narrow width.
2297 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2298 return nullptr;
2299
2300 // bo (ext X), (ext Y) --> ext (bo X, Y)
2301 // bo (ext X), C --> ext (bo X, C')
2302 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2303 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2304 if (IsSext)
2305 NewBinOp->setHasNoSignedWrap();
2306 else
2307 NewBinOp->setHasNoUnsignedWrap();
2308 }
2309 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2310}
2311
2313 return GEP1.isInBounds() && GEP2.isInBounds();
2314}
2315
2316/// Thread a GEP operation with constant indices through the constant true/false
2317/// arms of a select.
2319 InstCombiner::BuilderTy &Builder) {
2320 if (!GEP.hasAllConstantIndices())
2321 return nullptr;
2322
2323 Instruction *Sel;
2324 Value *Cond;
2325 Constant *TrueC, *FalseC;
2326 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2327 !match(Sel,
2328 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2329 return nullptr;
2330
2331 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2332 // Propagate 'inbounds' and metadata from existing instructions.
2333 // Note: using IRBuilder to create the constants for efficiency.
2334 SmallVector<Value *, 4> IndexC(GEP.indices());
2335 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2336 Type *Ty = GEP.getSourceElementType();
2337 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2338 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2339 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2340}
2341
2342// Canonicalization:
2343// gep T, (gep i8, base, C1), (Index + C2) into
2344// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2346 GEPOperator *Src,
2347 InstCombinerImpl &IC) {
2348 if (GEP.getNumIndices() != 1)
2349 return nullptr;
2350 auto &DL = IC.getDataLayout();
2351 Value *Base;
2352 const APInt *C1;
2353 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2354 return nullptr;
2355 Value *VarIndex;
2356 const APInt *C2;
2357 Type *PtrTy = Src->getType()->getScalarType();
2358 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2359 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2360 return nullptr;
2361 if (C1->getBitWidth() != IndexSizeInBits ||
2362 C2->getBitWidth() != IndexSizeInBits)
2363 return nullptr;
2364 Type *BaseType = GEP.getSourceElementType();
2365 if (isa<ScalableVectorType>(BaseType))
2366 return nullptr;
2367 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2368 APInt NewOffset = TypeSize * *C2 + *C1;
2369 if (NewOffset.isZero() ||
2370 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2371 Value *GEPConst =
2372 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset));
2373 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex);
2374 }
2375
2376 return nullptr;
2377}
2378
2380 GEPOperator *Src) {
2381 // Combine Indices - If the source pointer to this getelementptr instruction
2382 // is a getelementptr instruction with matching element type, combine the
2383 // indices of the two getelementptr instructions into a single instruction.
2384 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2385 return nullptr;
2386
2387 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2388 return I;
2389
2390 // For constant GEPs, use a more general offset-based folding approach.
2391 Type *PtrTy = Src->getType()->getScalarType();
2392 if (GEP.hasAllConstantIndices() &&
2393 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2394 // Split Src into a variable part and a constant suffix.
2396 Type *BaseType = GTI.getIndexedType();
2397 bool IsFirstType = true;
2398 unsigned NumVarIndices = 0;
2399 for (auto Pair : enumerate(Src->indices())) {
2400 if (!isa<ConstantInt>(Pair.value())) {
2401 BaseType = GTI.getIndexedType();
2402 IsFirstType = false;
2403 NumVarIndices = Pair.index() + 1;
2404 }
2405 ++GTI;
2406 }
2407
2408 // Determine the offset for the constant suffix of Src.
2410 if (NumVarIndices != Src->getNumIndices()) {
2411 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2412 if (BaseType->isScalableTy())
2413 return nullptr;
2414
2415 SmallVector<Value *> ConstantIndices;
2416 if (!IsFirstType)
2417 ConstantIndices.push_back(
2419 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2420 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2421 }
2422
2423 // Add the offset for GEP (which is fully constant).
2424 if (!GEP.accumulateConstantOffset(DL, Offset))
2425 return nullptr;
2426
2427 APInt OffsetOld = Offset;
2428 // Convert the total offset back into indices.
2429 SmallVector<APInt> ConstIndices =
2431 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
2432 // If both GEP are constant-indexed, and cannot be merged in either way,
2433 // convert them to a GEP of i8.
2434 if (Src->hasAllConstantIndices())
2435 return replaceInstUsesWith(
2437 Builder.getInt8Ty(), Src->getOperand(0),
2438 Builder.getInt(OffsetOld), "",
2439 isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))));
2440 return nullptr;
2441 }
2442
2443 bool IsInBounds = isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP));
2444 SmallVector<Value *> Indices;
2445 append_range(Indices, drop_end(Src->indices(),
2446 Src->getNumIndices() - NumVarIndices));
2447 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2448 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2449 // Even if the total offset is inbounds, we may end up representing it
2450 // by first performing a larger negative offset, and then a smaller
2451 // positive one. The large negative offset might go out of bounds. Only
2452 // preserve inbounds if all signs are the same.
2453 IsInBounds &= Idx.isNonNegative() == ConstIndices[0].isNonNegative();
2454 }
2455
2456 return replaceInstUsesWith(
2457 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2458 Indices, "", IsInBounds));
2459 }
2460
2461 if (Src->getResultElementType() != GEP.getSourceElementType())
2462 return nullptr;
2463
2464 SmallVector<Value*, 8> Indices;
2465
2466 // Find out whether the last index in the source GEP is a sequential idx.
2467 bool EndsWithSequential = false;
2468 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2469 I != E; ++I)
2470 EndsWithSequential = I.isSequential();
2471
2472 // Can we combine the two pointer arithmetics offsets?
2473 if (EndsWithSequential) {
2474 // Replace: gep (gep %P, long B), long A, ...
2475 // With: T = long A+B; gep %P, T, ...
2476 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2477 Value *GO1 = GEP.getOperand(1);
2478
2479 // If they aren't the same type, then the input hasn't been processed
2480 // by the loop above yet (which canonicalizes sequential index types to
2481 // intptr_t). Just avoid transforming this until the input has been
2482 // normalized.
2483 if (SO1->getType() != GO1->getType())
2484 return nullptr;
2485
2486 Value *Sum =
2487 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2488 // Only do the combine when we are sure the cost after the
2489 // merge is never more than that before the merge.
2490 if (Sum == nullptr)
2491 return nullptr;
2492
2493 // Update the GEP in place if possible.
2494 if (Src->getNumOperands() == 2) {
2495 GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
2496 replaceOperand(GEP, 0, Src->getOperand(0));
2497 replaceOperand(GEP, 1, Sum);
2498 return &GEP;
2499 }
2500 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2501 Indices.push_back(Sum);
2502 Indices.append(GEP.op_begin()+2, GEP.op_end());
2503 } else if (isa<Constant>(*GEP.idx_begin()) &&
2504 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2505 Src->getNumOperands() != 1) {
2506 // Otherwise we can do the fold if the first index of the GEP is a zero
2507 Indices.append(Src->op_begin()+1, Src->op_end());
2508 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2509 }
2510
2511 if (!Indices.empty())
2512 return replaceInstUsesWith(
2514 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2515 isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))));
2516
2517 return nullptr;
2518}
2519
2521 BuilderTy *Builder,
2522 bool &DoesConsume, unsigned Depth) {
2523 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2524 // ~(~(X)) -> X.
2525 Value *A, *B;
2526 if (match(V, m_Not(m_Value(A)))) {
2527 DoesConsume = true;
2528 return A;
2529 }
2530
2531 Constant *C;
2532 // Constants can be considered to be not'ed values.
2533 if (match(V, m_ImmConstant(C)))
2534 return ConstantExpr::getNot(C);
2535
2537 return nullptr;
2538
2539 // The rest of the cases require that we invert all uses so don't bother
2540 // doing the analysis if we know we can't use the result.
2541 if (!WillInvertAllUses)
2542 return nullptr;
2543
2544 // Compares can be inverted if all of their uses are being modified to use
2545 // the ~V.
2546 if (auto *I = dyn_cast<CmpInst>(V)) {
2547 if (Builder != nullptr)
2548 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2549 I->getOperand(1));
2550 return NonNull;
2551 }
2552
2553 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2554 // `(-1 - B) - A` if we are willing to invert all of the uses.
2555 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2556 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2557 DoesConsume, Depth))
2558 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2559 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2560 DoesConsume, Depth))
2561 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2562 return nullptr;
2563 }
2564
2565 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2566 // into `A ^ B` if we are willing to invert all of the uses.
2567 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2568 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2569 DoesConsume, Depth))
2570 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2571 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2572 DoesConsume, Depth))
2573 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2574 return nullptr;
2575 }
2576
2577 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2578 // `A + (-1 - B)` if we are willing to invert all of the uses.
2579 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2580 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2581 DoesConsume, Depth))
2582 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2583 return nullptr;
2584 }
2585
2586 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2587 // into `A s>> B` if we are willing to invert all of the uses.
2588 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2589 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2590 DoesConsume, Depth))
2591 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2592 return nullptr;
2593 }
2594
2595 Value *Cond;
2596 // LogicOps are special in that we canonicalize them at the cost of an
2597 // instruction.
2598 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2599 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2600 // Selects/min/max with invertible operands are freely invertible
2601 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2602 bool LocalDoesConsume = DoesConsume;
2603 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2604 LocalDoesConsume, Depth))
2605 return nullptr;
2606 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2607 LocalDoesConsume, Depth)) {
2608 DoesConsume = LocalDoesConsume;
2609 if (Builder != nullptr) {
2610 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2611 DoesConsume, Depth);
2612 assert(NotB != nullptr &&
2613 "Unable to build inverted value for known freely invertable op");
2614 if (auto *II = dyn_cast<IntrinsicInst>(V))
2616 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2617 return Builder->CreateSelect(Cond, NotA, NotB);
2618 }
2619 return NonNull;
2620 }
2621 }
2622
2623 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2624 bool LocalDoesConsume = DoesConsume;
2626 for (Use &U : PN->operands()) {
2627 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2628 Value *NewIncomingVal = getFreelyInvertedImpl(
2629 U.get(), /*WillInvertAllUses=*/false,
2630 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2631 if (NewIncomingVal == nullptr)
2632 return nullptr;
2633 // Make sure that we can safely erase the original PHI node.
2634 if (NewIncomingVal == V)
2635 return nullptr;
2636 if (Builder != nullptr)
2637 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2638 }
2639
2640 DoesConsume = LocalDoesConsume;
2641 if (Builder != nullptr) {
2644 PHINode *NewPN =
2645 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2646 for (auto [Val, Pred] : IncomingValues)
2647 NewPN->addIncoming(Val, Pred);
2648 return NewPN;
2649 }
2650 return NonNull;
2651 }
2652
2653 if (match(V, m_SExtLike(m_Value(A)))) {
2654 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2655 DoesConsume, Depth))
2656 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2657 return nullptr;
2658 }
2659
2660 if (match(V, m_Trunc(m_Value(A)))) {
2661 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2662 DoesConsume, Depth))
2663 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2664 return nullptr;
2665 }
2666
2667 // De Morgan's Laws:
2668 // (~(A | B)) -> (~A & ~B)
2669 // (~(A & B)) -> (~A | ~B)
2670 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2671 bool IsLogical, Value *A,
2672 Value *B) -> Value * {
2673 bool LocalDoesConsume = DoesConsume;
2674 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2675 LocalDoesConsume, Depth))
2676 return nullptr;
2677 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2678 LocalDoesConsume, Depth)) {
2679 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2680 LocalDoesConsume, Depth);
2681 DoesConsume = LocalDoesConsume;
2682 if (IsLogical)
2683 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2684 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2685 }
2686
2687 return nullptr;
2688 };
2689
2690 if (match(V, m_Or(m_Value(A), m_Value(B))))
2691 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2692 B);
2693
2694 if (match(V, m_And(m_Value(A), m_Value(B))))
2695 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2696 B);
2697
2698 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2699 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2700 B);
2701
2702 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2703 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2704 B);
2705
2706 return nullptr;
2707}
2708
2710 Value *PtrOp = GEP.getOperand(0);
2711 SmallVector<Value *, 8> Indices(GEP.indices());
2712 Type *GEPType = GEP.getType();
2713 Type *GEPEltType = GEP.getSourceElementType();
2714 if (Value *V =
2715 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
2717 return replaceInstUsesWith(GEP, V);
2718
2719 // For vector geps, use the generic demanded vector support.
2720 // Skip if GEP return type is scalable. The number of elements is unknown at
2721 // compile-time.
2722 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2723 auto VWidth = GEPFVTy->getNumElements();
2724 APInt PoisonElts(VWidth, 0);
2725 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2726 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2727 PoisonElts)) {
2728 if (V != &GEP)
2729 return replaceInstUsesWith(GEP, V);
2730 return &GEP;
2731 }
2732
2733 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2734 // possible (decide on canonical form for pointer broadcast), 3) exploit
2735 // undef elements to decrease demanded bits
2736 }
2737
2738 // Eliminate unneeded casts for indices, and replace indices which displace
2739 // by multiples of a zero size type with zero.
2740 bool MadeChange = false;
2741
2742 // Index width may not be the same width as pointer width.
2743 // Data layout chooses the right type based on supported integer types.
2744 Type *NewScalarIndexTy =
2745 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2746
2748 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2749 ++I, ++GTI) {
2750 // Skip indices into struct types.
2751 if (GTI.isStruct())
2752 continue;
2753
2754 Type *IndexTy = (*I)->getType();
2755 Type *NewIndexType =
2756 IndexTy->isVectorTy()
2757 ? VectorType::get(NewScalarIndexTy,
2758 cast<VectorType>(IndexTy)->getElementCount())
2759 : NewScalarIndexTy;
2760
2761 // If the element type has zero size then any index over it is equivalent
2762 // to an index of zero, so replace it with zero if it is not zero already.
2763 Type *EltTy = GTI.getIndexedType();
2764 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2765 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2766 *I = Constant::getNullValue(NewIndexType);
2767 MadeChange = true;
2768 }
2769
2770 if (IndexTy != NewIndexType) {
2771 // If we are using a wider index than needed for this platform, shrink
2772 // it to what we need. If narrower, sign-extend it to what we need.
2773 // This explicit cast can make subsequent optimizations more obvious.
2774 *I = Builder.CreateIntCast(*I, NewIndexType, true);
2775 MadeChange = true;
2776 }
2777 }
2778 if (MadeChange)
2779 return &GEP;
2780
2781 // Canonicalize constant GEPs to i8 type.
2782 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
2784 if (GEP.accumulateConstantOffset(DL, Offset))
2785 return replaceInstUsesWith(
2787 GEP.getNoWrapFlags()));
2788 }
2789
2790 // Canonicalize
2791 // - scalable GEPs to an explicit offset using the llvm.vscale intrinsic.
2792 // This has better support in BasicAA.
2793 // - gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two
2794 // multiplies together.
2795 if (GEPEltType->isScalableTy() ||
2796 (!GEPEltType->isIntegerTy(8) && GEP.getNumIndices() == 1 &&
2797 match(GEP.getOperand(1),
2799 m_Shl(m_Value(), m_ConstantInt())))))) {
2800 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
2801 return replaceInstUsesWith(
2802 GEP, Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.isInBounds()));
2803 }
2804
2805 // Check to see if the inputs to the PHI node are getelementptr instructions.
2806 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2807 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2808 if (!Op1)
2809 return nullptr;
2810
2811 // Don't fold a GEP into itself through a PHI node. This can only happen
2812 // through the back-edge of a loop. Folding a GEP into itself means that
2813 // the value of the previous iteration needs to be stored in the meantime,
2814 // thus requiring an additional register variable to be live, but not
2815 // actually achieving anything (the GEP still needs to be executed once per
2816 // loop iteration).
2817 if (Op1 == &GEP)
2818 return nullptr;
2819
2820 int DI = -1;
2821
2822 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2823 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2824 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2825 Op1->getSourceElementType() != Op2->getSourceElementType())
2826 return nullptr;
2827
2828 // As for Op1 above, don't try to fold a GEP into itself.
2829 if (Op2 == &GEP)
2830 return nullptr;
2831
2832 // Keep track of the type as we walk the GEP.
2833 Type *CurTy = nullptr;
2834
2835 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2836 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2837 return nullptr;
2838
2839 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2840 if (DI == -1) {
2841 // We have not seen any differences yet in the GEPs feeding the
2842 // PHI yet, so we record this one if it is allowed to be a
2843 // variable.
2844
2845 // The first two arguments can vary for any GEP, the rest have to be
2846 // static for struct slots
2847 if (J > 1) {
2848 assert(CurTy && "No current type?");
2849 if (CurTy->isStructTy())
2850 return nullptr;
2851 }
2852
2853 DI = J;
2854 } else {
2855 // The GEP is different by more than one input. While this could be
2856 // extended to support GEPs that vary by more than one variable it
2857 // doesn't make sense since it greatly increases the complexity and
2858 // would result in an R+R+R addressing mode which no backend
2859 // directly supports and would need to be broken into several
2860 // simpler instructions anyway.
2861 return nullptr;
2862 }
2863 }
2864
2865 // Sink down a layer of the type for the next iteration.
2866 if (J > 0) {
2867 if (J == 1) {
2868 CurTy = Op1->getSourceElementType();
2869 } else {
2870 CurTy =
2871 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2872 }
2873 }
2874 }
2875 }
2876
2877 // If not all GEPs are identical we'll have to create a new PHI node.
2878 // Check that the old PHI node has only one use so that it will get
2879 // removed.
2880 if (DI != -1 && !PN->hasOneUse())
2881 return nullptr;
2882
2883 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2884 if (DI == -1) {
2885 // All the GEPs feeding the PHI are identical. Clone one down into our
2886 // BB so that it can be merged with the current GEP.
2887 } else {
2888 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2889 // into the current block so it can be merged, and create a new PHI to
2890 // set that index.
2891 PHINode *NewPN;
2892 {
2895 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2896 PN->getNumOperands());
2897 }
2898
2899 for (auto &I : PN->operands())
2900 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2901 PN->getIncomingBlock(I));
2902
2903 NewGEP->setOperand(DI, NewPN);
2904 }
2905
2906 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2907 return replaceOperand(GEP, 0, NewGEP);
2908 }
2909
2910 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2911 if (Instruction *I = visitGEPOfGEP(GEP, Src))
2912 return I;
2913
2914 if (GEP.getNumIndices() == 1) {
2915 unsigned AS = GEP.getPointerAddressSpace();
2916 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2917 DL.getIndexSizeInBits(AS)) {
2918 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
2919
2920 if (TyAllocSize == 1) {
2921 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
2922 // but only if the result pointer is only used as if it were an integer,
2923 // or both point to the same underlying object (otherwise provenance is
2924 // not necessarily retained).
2925 Value *X = GEP.getPointerOperand();
2926 Value *Y;
2927 if (match(GEP.getOperand(1),
2929 GEPType == Y->getType()) {
2930 bool HasSameUnderlyingObject =
2932 bool Changed = false;
2933 GEP.replaceUsesWithIf(Y, [&](Use &U) {
2934 bool ShouldReplace = HasSameUnderlyingObject ||
2935 isa<ICmpInst>(U.getUser()) ||
2936 isa<PtrToIntInst>(U.getUser());
2937 Changed |= ShouldReplace;
2938 return ShouldReplace;
2939 });
2940 return Changed ? &GEP : nullptr;
2941 }
2942 } else {
2943 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
2944 Value *V;
2945 if ((has_single_bit(TyAllocSize) &&
2946 match(GEP.getOperand(1),
2948 m_SpecificInt(countr_zero(TyAllocSize)))))) ||
2949 match(GEP.getOperand(1),
2950 m_Exact(m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize))))) {
2952 GEP.getPointerOperand(), V,
2953 GEP.getNoWrapFlags());
2954 }
2955 }
2956 }
2957 }
2958 // We do not handle pointer-vector geps here.
2959 if (GEPType->isVectorTy())
2960 return nullptr;
2961
2962 if (GEP.getNumIndices() == 1) {
2963 // We can only preserve inbounds if the original gep is inbounds, the add
2964 // is nsw, and the add operands are non-negative.
2965 auto CanPreserveInBounds = [&](bool AddIsNSW, Value *Idx1, Value *Idx2) {
2967 return GEP.isInBounds() && AddIsNSW && isKnownNonNegative(Idx1, Q) &&
2968 isKnownNonNegative(Idx2, Q);
2969 };
2970
2971 // Try to replace ADD + GEP with GEP + GEP.
2972 Value *Idx1, *Idx2;
2973 if (match(GEP.getOperand(1),
2974 m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) {
2975 // %idx = add i64 %idx1, %idx2
2976 // %gep = getelementptr i32, ptr %ptr, i64 %idx
2977 // as:
2978 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
2979 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
2980 bool IsInBounds = CanPreserveInBounds(
2981 cast<OverflowingBinaryOperator>(GEP.getOperand(1))->hasNoSignedWrap(),
2982 Idx1, Idx2);
2983 auto *NewPtr =
2984 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
2985 Idx1, "", IsInBounds);
2986 return replaceInstUsesWith(
2987 GEP, Builder.CreateGEP(GEP.getSourceElementType(), NewPtr, Idx2, "",
2988 IsInBounds));
2989 }
2990 ConstantInt *C;
2991 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd(
2992 m_Value(Idx1), m_ConstantInt(C))))))) {
2993 // %add = add nsw i32 %idx1, idx2
2994 // %sidx = sext i32 %add to i64
2995 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
2996 // as:
2997 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
2998 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
2999 bool IsInBounds = CanPreserveInBounds(
3000 /*IsNSW=*/true, Idx1, C);
3001 auto *NewPtr = Builder.CreateGEP(
3002 GEP.getSourceElementType(), GEP.getPointerOperand(),
3003 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "",
3004 IsInBounds);
3005 return replaceInstUsesWith(
3006 GEP,
3007 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3008 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3009 "", IsInBounds));
3010 }
3011 }
3012
3013 if (!GEP.isInBounds()) {
3014 unsigned IdxWidth =
3016 APInt BasePtrOffset(IdxWidth, 0);
3017 Value *UnderlyingPtrOp =
3019 BasePtrOffset);
3020 bool CanBeNull, CanBeFreed;
3021 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3022 DL, CanBeNull, CanBeFreed);
3023 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3024 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3025 BasePtrOffset.isNonNegative()) {
3026 APInt AllocSize(IdxWidth, DerefBytes);
3027 if (BasePtrOffset.ule(AllocSize)) {
3029 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3030 }
3031 }
3032 }
3033 }
3034
3036 return R;
3037
3038 return nullptr;
3039}
3040
3042 Instruction *AI) {
3043 if (isa<ConstantPointerNull>(V))
3044 return true;
3045 if (auto *LI = dyn_cast<LoadInst>(V))
3046 return isa<GlobalVariable>(LI->getPointerOperand());
3047 // Two distinct allocations will never be equal.
3048 return isAllocLikeFn(V, &TLI) && V != AI;
3049}
3050
3051/// Given a call CB which uses an address UsedV, return true if we can prove the
3052/// call's only possible effect is storing to V.
3053static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3054 const TargetLibraryInfo &TLI) {
3055 if (!CB.use_empty())
3056 // TODO: add recursion if returned attribute is present
3057 return false;
3058
3059 if (CB.isTerminator())
3060 // TODO: remove implementation restriction
3061 return false;
3062
3063 if (!CB.willReturn() || !CB.doesNotThrow())
3064 return false;
3065
3066 // If the only possible side effect of the call is writing to the alloca,
3067 // and the result isn't used, we can safely remove any reads implied by the
3068 // call including those which might read the alloca itself.
3069 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3070 return Dest && Dest->Ptr == UsedV;
3071}
3072
3075 const TargetLibraryInfo &TLI) {
3077 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3078 Worklist.push_back(AI);
3079
3080 do {
3081 Instruction *PI = Worklist.pop_back_val();
3082 for (User *U : PI->users()) {
3083 Instruction *I = cast<Instruction>(U);
3084 switch (I->getOpcode()) {
3085 default:
3086 // Give up the moment we see something we can't handle.
3087 return false;
3088
3089 case Instruction::AddrSpaceCast:
3090 case Instruction::BitCast:
3091 case Instruction::GetElementPtr:
3092 Users.emplace_back(I);
3093 Worklist.push_back(I);
3094 continue;
3095
3096 case Instruction::ICmp: {
3097 ICmpInst *ICI = cast<ICmpInst>(I);
3098 // We can fold eq/ne comparisons with null to false/true, respectively.
3099 // We also fold comparisons in some conditions provided the alloc has
3100 // not escaped (see isNeverEqualToUnescapedAlloc).
3101 if (!ICI->isEquality())
3102 return false;
3103 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3104 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3105 return false;
3106
3107 // Do not fold compares to aligned_alloc calls, as they may have to
3108 // return null in case the required alignment cannot be satisfied,
3109 // unless we can prove that both alignment and size are valid.
3110 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3111 // Check if alignment and size of a call to aligned_alloc is valid,
3112 // that is alignment is a power-of-2 and the size is a multiple of the
3113 // alignment.
3114 const APInt *Alignment;
3115 const APInt *Size;
3116 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3117 match(CB->getArgOperand(1), m_APInt(Size)) &&
3118 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3119 };
3120 auto *CB = dyn_cast<CallBase>(AI);
3121 LibFunc TheLibFunc;
3122 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3123 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3124 !AlignmentAndSizeKnownValid(CB))
3125 return false;
3126 Users.emplace_back(I);
3127 continue;
3128 }
3129
3130 case Instruction::Call:
3131 // Ignore no-op and store intrinsics.
3132 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3133 switch (II->getIntrinsicID()) {
3134 default:
3135 return false;
3136
3137 case Intrinsic::memmove:
3138 case Intrinsic::memcpy:
3139 case Intrinsic::memset: {
3140 MemIntrinsic *MI = cast<MemIntrinsic>(II);
3141 if (MI->isVolatile() || MI->getRawDest() != PI)
3142 return false;
3143 [[fallthrough]];
3144 }
3145 case Intrinsic::assume:
3146 case Intrinsic::invariant_start:
3147 case Intrinsic::invariant_end:
3148 case Intrinsic::lifetime_start:
3149 case Intrinsic::lifetime_end:
3150 case Intrinsic::objectsize:
3151 Users.emplace_back(I);
3152 continue;
3153 case Intrinsic::launder_invariant_group:
3154 case Intrinsic::strip_invariant_group:
3155 Users.emplace_back(I);
3156 Worklist.push_back(I);
3157 continue;
3158 }
3159 }
3160
3161 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3162 Users.emplace_back(I);
3163 continue;
3164 }
3165
3166 if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3167 getAllocationFamily(I, &TLI) == Family) {
3168 assert(Family);
3169 Users.emplace_back(I);
3170 continue;
3171 }
3172
3173 if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
3174 getAllocationFamily(I, &TLI) == Family) {
3175 assert(Family);
3176 Users.emplace_back(I);
3177 Worklist.push_back(I);
3178 continue;
3179 }
3180
3181 return false;
3182
3183 case Instruction::Store: {
3184 StoreInst *SI = cast<StoreInst>(I);
3185 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3186 return false;
3187 Users.emplace_back(I);
3188 continue;
3189 }
3190 }
3191 llvm_unreachable("missing a return?");
3192 }
3193 } while (!Worklist.empty());
3194 return true;
3195}
3196
3198 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
3199
3200 // If we have a malloc call which is only used in any amount of comparisons to
3201 // null and free calls, delete the calls and replace the comparisons with true
3202 // or false as appropriate.
3203
3204 // This is based on the principle that we can substitute our own allocation
3205 // function (which will never return null) rather than knowledge of the
3206 // specific function being called. In some sense this can change the permitted
3207 // outputs of a program (when we convert a malloc to an alloca, the fact that
3208 // the allocation is now on the stack is potentially visible, for example),
3209 // but we believe in a permissible manner.
3211
3212 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3213 // before each store.
3216 std::unique_ptr<DIBuilder> DIB;
3217 if (isa<AllocaInst>(MI)) {
3218 findDbgUsers(DVIs, &MI, &DVRs);
3219 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3220 }
3221
3222 if (isAllocSiteRemovable(&MI, Users, TLI)) {
3223 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3224 // Lowering all @llvm.objectsize calls first because they may
3225 // use a bitcast/GEP of the alloca we are removing.
3226 if (!Users[i])
3227 continue;
3228
3229 Instruction *I = cast<Instruction>(&*Users[i]);
3230
3231 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3232 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3233 SmallVector<Instruction *> InsertedInstructions;
3234 Value *Result = lowerObjectSizeCall(
3235 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3236 for (Instruction *Inserted : InsertedInstructions)
3237 Worklist.add(Inserted);
3238 replaceInstUsesWith(*I, Result);
3240 Users[i] = nullptr; // Skip examining in the next loop.
3241 }
3242 }
3243 }
3244 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3245 if (!Users[i])
3246 continue;
3247
3248 Instruction *I = cast<Instruction>(&*Users[i]);
3249
3250 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3252 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3253 C->isFalseWhenEqual()));
3254 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3255 for (auto *DVI : DVIs)
3256 if (DVI->isAddressOfVariable())
3257 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
3258 for (auto *DVR : DVRs)
3259 if (DVR->isAddressOfVariable())
3260 ConvertDebugDeclareToDebugValue(DVR, SI, *DIB);
3261 } else {
3262 // Casts, GEP, or anything else: we're about to delete this instruction,
3263 // so it can not have any valid uses.
3264 replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
3265 }
3267 }
3268
3269 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
3270 // Replace invoke with a NOP intrinsic to maintain the original CFG
3271 Module *M = II->getModule();
3272 Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
3273 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
3274 std::nullopt, "", II->getParent());
3275 }
3276
3277 // Remove debug intrinsics which describe the value contained within the
3278 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3279 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3280 //
3281 // ```
3282 // define void @foo(i32 %0) {
3283 // %a = alloca i32 ; Deleted.
3284 // store i32 %0, i32* %a
3285 // dbg.value(i32 %0, "arg0") ; Not deleted.
3286 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3287 // call void @trivially_inlinable_no_op(i32* %a)
3288 // ret void
3289 // }
3290 // ```
3291 //
3292 // This may not be required if we stop describing the contents of allocas
3293 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3294 // the LowerDbgDeclare utility.
3295 //
3296 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3297 // "arg0" dbg.value may be stale after the call. However, failing to remove
3298 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3299 //
3300 // FIXME: the Assignment Tracking project has now likely made this
3301 // redundant (and it's sometimes harmful).
3302 for (auto *DVI : DVIs)
3303 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3304 DVI->eraseFromParent();
3305 for (auto *DVR : DVRs)
3306 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3307 DVR->eraseFromParent();
3308
3309 return eraseInstFromFunction(MI);
3310 }
3311 return nullptr;
3312}
3313
3314/// Move the call to free before a NULL test.
3315///
3316/// Check if this free is accessed after its argument has been test
3317/// against NULL (property 0).
3318/// If yes, it is legal to move this call in its predecessor block.
3319///
3320/// The move is performed only if the block containing the call to free
3321/// will be removed, i.e.:
3322/// 1. it has only one predecessor P, and P has two successors
3323/// 2. it contains the call, noops, and an unconditional branch
3324/// 3. its successor is the same as its predecessor's successor
3325///
3326/// The profitability is out-of concern here and this function should
3327/// be called only if the caller knows this transformation would be
3328/// profitable (e.g., for code size).
3330 const DataLayout &DL) {
3331 Value *Op = FI.getArgOperand(0);
3332 BasicBlock *FreeInstrBB = FI.getParent();
3333 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3334
3335 // Validate part of constraint #1: Only one predecessor
3336 // FIXME: We can extend the number of predecessor, but in that case, we
3337 // would duplicate the call to free in each predecessor and it may
3338 // not be profitable even for code size.
3339 if (!PredBB)
3340 return nullptr;
3341
3342 // Validate constraint #2: Does this block contains only the call to
3343 // free, noops, and an unconditional branch?
3344 BasicBlock *SuccBB;
3345 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3346 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3347 return nullptr;
3348
3349 // If there are only 2 instructions in the block, at this point,
3350 // this is the call to free and unconditional.
3351 // If there are more than 2 instructions, check that they are noops
3352 // i.e., they won't hurt the performance of the generated code.
3353 if (FreeInstrBB->size() != 2) {
3354 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3355 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3356 continue;
3357 auto *Cast = dyn_cast<CastInst>(&Inst);
3358 if (!Cast || !Cast->isNoopCast(DL))
3359 return nullptr;
3360 }
3361 }
3362 // Validate the rest of constraint #1 by matching on the pred branch.
3363 Instruction *TI = PredBB->getTerminator();
3364 BasicBlock *TrueBB, *FalseBB;
3366 if (!match(TI, m_Br(m_ICmp(Pred,
3368 m_Specific(Op->stripPointerCasts())),
3369 m_Zero()),
3370 TrueBB, FalseBB)))
3371 return nullptr;
3372 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3373 return nullptr;
3374
3375 // Validate constraint #3: Ensure the null case just falls through.
3376 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3377 return nullptr;
3378 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3379 "Broken CFG: missing edge from predecessor to successor");
3380
3381 // At this point, we know that everything in FreeInstrBB can be moved
3382 // before TI.
3383 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3384 if (&Instr == FreeInstrBBTerminator)
3385 break;
3386 Instr.moveBeforePreserving(TI);
3387 }
3388 assert(FreeInstrBB->size() == 1 &&
3389 "Only the branch instruction should remain");
3390
3391 // Now that we've moved the call to free before the NULL check, we have to
3392 // remove any attributes on its parameter that imply it's non-null, because
3393 // those attributes might have only been valid because of the NULL check, and
3394 // we can get miscompiles if we keep them. This is conservative if non-null is
3395 // also implied by something other than the NULL check, but it's guaranteed to
3396 // be correct, and the conservativeness won't matter in practice, since the
3397 // attributes are irrelevant for the call to free itself and the pointer
3398 // shouldn't be used after the call.
3399 AttributeList Attrs = FI.getAttributes();
3400 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3401 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3402 if (Dereferenceable.isValid()) {
3403 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3404 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3405 Attribute::Dereferenceable);
3406 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3407 }
3408 FI.setAttributes(Attrs);
3409
3410 return &FI;
3411}
3412
3414 // free undef -> unreachable.
3415 if (isa<UndefValue>(Op)) {
3416 // Leave a marker since we can't modify the CFG here.
3418 return eraseInstFromFunction(FI);
3419 }
3420
3421 // If we have 'free null' delete the instruction. This can happen in stl code
3422 // when lots of inlining happens.
3423 if (isa<ConstantPointerNull>(Op))
3424 return eraseInstFromFunction(FI);
3425
3426 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3427 // realloc() entirely.
3428 CallInst *CI = dyn_cast<CallInst>(Op);
3429 if (CI && CI->hasOneUse())
3430 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3431 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3432
3433 // If we optimize for code size, try to move the call to free before the null
3434 // test so that simplify cfg can remove the empty block and dead code
3435 // elimination the branch. I.e., helps to turn something like:
3436 // if (foo) free(foo);
3437 // into
3438 // free(foo);
3439 //
3440 // Note that we can only do this for 'free' and not for any flavor of
3441 // 'operator delete'; there is no 'operator delete' symbol for which we are
3442 // permitted to invent a call, even if we're passing in a null pointer.
3443 if (MinimizeSize) {
3444 LibFunc Func;
3445 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3447 return I;
3448 }
3449
3450 return nullptr;
3451}
3452
3454 Value *RetVal = RI.getReturnValue();
3455 if (!RetVal || !AttributeFuncs::isNoFPClassCompatibleType(RetVal->getType()))
3456 return nullptr;
3457
3458 Function *F = RI.getFunction();
3459 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3460 if (ReturnClass == fcNone)
3461 return nullptr;
3462
3463 KnownFPClass KnownClass;
3464 Value *Simplified =
3465 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, 0, &RI);
3466 if (!Simplified)
3467 return nullptr;
3468
3469 return ReturnInst::Create(RI.getContext(), Simplified);
3470}
3471
3472// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3474 // Try to remove the previous instruction if it must lead to unreachable.
3475 // This includes instructions like stores and "llvm.assume" that may not get
3476 // removed by simple dead code elimination.
3477 bool Changed = false;
3478 while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3479 // While we theoretically can erase EH, that would result in a block that
3480 // used to start with an EH no longer starting with EH, which is invalid.
3481 // To make it valid, we'd need to fixup predecessors to no longer refer to
3482 // this block, but that changes CFG, which is not allowed in InstCombine.
3483 if (Prev->isEHPad())
3484 break; // Can not drop any more instructions. We're done here.
3485
3487 break; // Can not drop any more instructions. We're done here.
3488 // Otherwise, this instruction can be freely erased,
3489 // even if it is not side-effect free.
3490
3491 // A value may still have uses before we process it here (for example, in
3492 // another unreachable block), so convert those to poison.
3493 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3494 eraseInstFromFunction(*Prev);
3495 Changed = true;
3496 }
3497 return Changed;
3498}
3499
3502 return nullptr;
3503}
3504
3506 assert(BI.isUnconditional() && "Only for unconditional branches.");
3507
3508 // If this store is the second-to-last instruction in the basic block
3509 // (excluding debug info and bitcasts of pointers) and if the block ends with
3510 // an unconditional branch, try to move the store to the successor block.
3511
3512 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3513 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3514 return BBI->isDebugOrPseudoInst() ||
3515 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3516 };
3517
3518 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3519 do {
3520 if (BBI != FirstInstr)
3521 --BBI;
3522 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3523
3524 return dyn_cast<StoreInst>(BBI);
3525 };
3526
3527 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3528 if (mergeStoreIntoSuccessor(*SI))
3529 return &BI;
3530
3531 return nullptr;
3532}
3533
3536 if (!DeadEdges.insert({From, To}).second)
3537 return;
3538
3539 // Replace phi node operands in successor with poison.
3540 for (PHINode &PN : To->phis())
3541 for (Use &U : PN.incoming_values())
3542 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
3543 replaceUse(U, PoisonValue::get(PN.getType()));
3544 addToWorklist(&PN);
3545 MadeIRChange = true;
3546 }
3547
3548 Worklist.push_back(To);
3549}
3550
3551// Under the assumption that I is unreachable, remove it and following
3552// instructions. Changes are reported directly to MadeIRChange.
3555 BasicBlock *BB = I->getParent();
3556 for (Instruction &Inst : make_early_inc_range(
3557 make_range(std::next(BB->getTerminator()->getReverseIterator()),
3558 std::next(I->getReverseIterator())))) {
3559 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3560 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
3561 MadeIRChange = true;
3562 }
3563 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3564 continue;
3565 // RemoveDIs: erase debug-info on this instruction manually.
3566 Inst.dropDbgRecords();
3568 MadeIRChange = true;
3569 }
3570
3571 SmallVector<Value *> Changed;
3572 if (handleUnreachableTerminator(BB->getTerminator(), Changed)) {
3573 MadeIRChange = true;
3574 for (Value *V : Changed)
3575 addToWorklist(cast<Instruction>(V));
3576 }
3577
3578 // Handle potentially dead successors.
3579 for (BasicBlock *Succ : successors(BB))
3580 addDeadEdge(BB, Succ, Worklist);
3581}
3582
3585 while (!Worklist.empty()) {
3586 BasicBlock *BB = Worklist.pop_back_val();
3587 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
3588 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
3589 }))
3590 continue;
3591
3593 }
3594}
3595
3597 BasicBlock *LiveSucc) {
3599 for (BasicBlock *Succ : successors(BB)) {
3600 // The live successor isn't dead.
3601 if (Succ == LiveSucc)
3602 continue;
3603
3604 addDeadEdge(BB, Succ, Worklist);
3605 }
3606
3608}
3609
3611 if (BI.isUnconditional())
3613
3614 // Change br (not X), label True, label False to: br X, label False, True
3615 Value *Cond = BI.getCondition();
3616 Value *X;
3617 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
3618 // Swap Destinations and condition...
3619 BI.swapSuccessors();
3620 if (BPI)
3622 return replaceOperand(BI, 0, X);
3623 }
3624
3625 // Canonicalize logical-and-with-invert as logical-or-with-invert.
3626 // This is done by inverting the condition and swapping successors:
3627 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
3628 Value *Y;
3629 if (isa<SelectInst>(Cond) &&
3630 match(Cond,
3632 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
3633 Value *Or = Builder.CreateLogicalOr(NotX, Y);
3634 BI.swapSuccessors();
3635 if (BPI)
3637 return replaceOperand(BI, 0, Or);
3638 }
3639
3640 // If the condition is irrelevant, remove the use so that other
3641 // transforms on the condition become more effective.
3642 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
3643 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
3644
3645 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3646 CmpInst::Predicate Pred;
3647 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
3648 !isCanonicalPredicate(Pred)) {
3649 // Swap destinations and condition.
3650 auto *Cmp = cast<CmpInst>(Cond);
3651 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
3652 BI.swapSuccessors();
3653 if (BPI)
3655 Worklist.push(Cmp);
3656 return &BI;
3657 }
3658
3659 if (isa<UndefValue>(Cond)) {
3660 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
3661 return nullptr;
3662 }
3663 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3665 BI.getSuccessor(!CI->getZExtValue()));
3666 return nullptr;
3667 }
3668
3669 DC.registerBranch(&BI);
3670 return nullptr;
3671}
3672
3673// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
3674// we can prove that both (switch C) and (switch X) go to the default when cond
3675// is false/true.
3678 bool IsTrueArm) {
3679 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3680 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
3681 if (!C)
3682 return nullptr;
3683
3684 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
3685 if (CstBB != SI.getDefaultDest())
3686 return nullptr;
3687 Value *X = Select->getOperand(3 - CstOpIdx);
3689 const APInt *RHSC;
3690 if (!match(Select->getCondition(),
3691 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
3692 return nullptr;
3693 if (IsTrueArm)
3694 Pred = ICmpInst::getInversePredicate(Pred);
3695
3696 // See whether we can replace the select with X
3698 for (auto Case : SI.cases())
3699 if (!CR.contains(Case.getCaseValue()->getValue()))
3700 return nullptr;
3701
3702 return X;
3703}
3704
3706 Value *Cond = SI.getCondition();
3707 Value *Op0;
3708 ConstantInt *AddRHS;
3709 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3710 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3711 for (auto Case : SI.cases()) {
3712 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3713 assert(isa<ConstantInt>(NewCase) &&
3714 "Result of expression should be constant");
3715 Case.setValue(cast<ConstantInt>(NewCase));
3716 }
3717 return replaceOperand(SI, 0, Op0);
3718 }
3719
3720 ConstantInt *SubLHS;
3721 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
3722 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
3723 for (auto Case : SI.cases()) {
3724 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
3725 assert(isa<ConstantInt>(NewCase) &&
3726 "Result of expression should be constant");
3727 Case.setValue(cast<ConstantInt>(NewCase));
3728 }
3729 return replaceOperand(SI, 0, Op0);
3730 }
3731
3732 uint64_t ShiftAmt;
3733 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
3734 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
3735 all_of(SI.cases(), [&](const auto &Case) {
3736 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3737 })) {
3738 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
3739 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond);
3740 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
3741 Shl->hasOneUse()) {
3742 Value *NewCond = Op0;
3743 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
3744 // If the shift may wrap, we need to mask off the shifted bits.
3745 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
3746 NewCond = Builder.CreateAnd(
3747 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
3748 }
3749 for (auto Case : SI.cases()) {
3750 const APInt &CaseVal = Case.getCaseValue()->getValue();
3751 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
3752 : CaseVal.lshr(ShiftAmt);
3753 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3754 }
3755 return replaceOperand(SI, 0, NewCond);
3756 }
3757 }
3758
3759 // Fold switch(zext/sext(X)) into switch(X) if possible.
3760 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
3761 bool IsZExt = isa<ZExtInst>(Cond);
3762 Type *SrcTy = Op0->getType();
3763 unsigned NewWidth = SrcTy->getScalarSizeInBits();
3764
3765 if (all_of(SI.cases(), [&](const auto &Case) {
3766 const APInt &CaseVal = Case.getCaseValue()->getValue();
3767 return IsZExt ? CaseVal.isIntN(NewWidth)
3768 : CaseVal.isSignedIntN(NewWidth);
3769 })) {
3770 for (auto &Case : SI.cases()) {
3771 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3772 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3773 }
3774 return replaceOperand(SI, 0, Op0);
3775 }
3776 }
3777
3778 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
3779 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
3780 if (Value *V =
3781 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
3782 return replaceOperand(SI, 0, V);
3783 if (Value *V =
3784 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
3785 return replaceOperand(SI, 0, V);
3786 }
3787
3788 KnownBits Known = computeKnownBits(Cond, 0, &SI);
3789 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3790 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3791
3792 // Compute the number of leading bits we can ignore.
3793 // TODO: A better way to determine this would use ComputeNumSignBits().
3794 for (const auto &C : SI.cases()) {
3795 LeadingKnownZeros =
3796 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
3797 LeadingKnownOnes =
3798 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
3799 }
3800
3801 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3802
3803 // Shrink the condition operand if the new type is smaller than the old type.
3804 // But do not shrink to a non-standard type, because backend can't generate
3805 // good code for that yet.
3806 // TODO: We can make it aggressive again after fixing PR39569.
3807 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3808 shouldChangeType(Known.getBitWidth(), NewWidth)) {
3809 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3811 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3812
3813 for (auto Case : SI.cases()) {
3814 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3815 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3816 }
3817 return replaceOperand(SI, 0, NewCond);
3818 }
3819
3820 if (isa<UndefValue>(Cond)) {
3821 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
3822 return nullptr;
3823 }
3824 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3825 handlePotentiallyDeadSuccessors(SI.getParent(),
3826 SI.findCaseValue(CI)->getCaseSuccessor());
3827 return nullptr;
3828 }
3829
3830 return nullptr;
3831}
3832
3834InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
3835 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
3836 if (!WO)
3837 return nullptr;
3838
3839 Intrinsic::ID OvID = WO->getIntrinsicID();
3840 const APInt *C = nullptr;
3841 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
3842 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3843 OvID == Intrinsic::umul_with_overflow)) {
3844 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
3845 if (C->isAllOnes())
3846 return BinaryOperator::CreateNeg(WO->getLHS());
3847 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
3848 if (C->isPowerOf2()) {
3849 return BinaryOperator::CreateShl(
3850 WO->getLHS(),
3851 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
3852 }
3853 }
3854 }
3855
3856 // We're extracting from an overflow intrinsic. See if we're the only user.
3857 // That allows us to simplify multiple result intrinsics to simpler things
3858 // that just get one value.
3859 if (!WO->hasOneUse())
3860 return nullptr;
3861
3862 // Check if we're grabbing only the result of a 'with overflow' intrinsic
3863 // and replace it with a traditional binary instruction.
3864 if (*EV.idx_begin() == 0) {
3865 Instruction::BinaryOps BinOp = WO->getBinaryOp();
3866 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
3867 // Replace the old instruction's uses with poison.
3868 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
3870 return BinaryOperator::Create(BinOp, LHS, RHS);
3871 }
3872
3873 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
3874
3875 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
3876 if (OvID == Intrinsic::usub_with_overflow)
3877 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
3878
3879 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
3880 // +1 is not possible because we assume signed values.
3881 if (OvID == Intrinsic::smul_with_overflow &&
3882 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
3883 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
3884
3885 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
3886 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
3887 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
3888 // Only handle even bitwidths for performance reasons.
3889 if (BitWidth % 2 == 0)
3890 return new ICmpInst(
3891 ICmpInst::ICMP_UGT, WO->getLHS(),
3892 ConstantInt::get(WO->getLHS()->getType(),
3894 }
3895
3896 // If only the overflow result is used, and the right hand side is a
3897 // constant (or constant splat), we can remove the intrinsic by directly
3898 // checking for overflow.
3899 if (C) {
3900 // Compute the no-wrap range for LHS given RHS=C, then construct an
3901 // equivalent icmp, potentially using an offset.
3903 WO->getBinaryOp(), *C, WO->getNoWrapKind());
3904
3905 CmpInst::Predicate Pred;
3906 APInt NewRHSC, Offset;
3907 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
3908 auto *OpTy = WO->getRHS()->getType();
3909 auto *NewLHS = WO->getLHS();
3910 if (Offset != 0)
3911 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
3912 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
3913 ConstantInt::get(OpTy, NewRHSC));
3914 }
3915
3916 return nullptr;
3917}
3918
3920 Value *Agg = EV.getAggregateOperand();
3921
3922 if (!EV.hasIndices())
3923 return replaceInstUsesWith(EV, Agg);
3924
3925 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
3926 SQ.getWithInstruction(&EV)))
3927 return replaceInstUsesWith(EV, V);
3928
3929 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
3930 // We're extracting from an insertvalue instruction, compare the indices
3931 const unsigned *exti, *exte, *insi, *inse;
3932 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
3933 exte = EV.idx_end(), inse = IV->idx_end();
3934 exti != exte && insi != inse;
3935 ++exti, ++insi) {
3936 if (*insi != *exti)
3937 // The insert and extract both reference distinctly different elements.
3938 // This means the extract is not influenced by the insert, and we can
3939 // replace the aggregate operand of the extract with the aggregate
3940 // operand of the insert. i.e., replace
3941 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3942 // %E = extractvalue { i32, { i32 } } %I, 0
3943 // with
3944 // %E = extractvalue { i32, { i32 } } %A, 0
3945 return ExtractValueInst::Create(IV->getAggregateOperand(),
3946 EV.getIndices());
3947 }
3948 if (exti == exte && insi == inse)
3949 // Both iterators are at the end: Index lists are identical. Replace
3950 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3951 // %C = extractvalue { i32, { i32 } } %B, 1, 0
3952 // with "i32 42"
3953 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
3954 if (exti == exte) {
3955 // The extract list is a prefix of the insert list. i.e. replace
3956 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3957 // %E = extractvalue { i32, { i32 } } %I, 1
3958 // with
3959 // %X = extractvalue { i32, { i32 } } %A, 1
3960 // %E = insertvalue { i32 } %X, i32 42, 0
3961 // by switching the order of the insert and extract (though the
3962 // insertvalue should be left in, since it may have other uses).
3963 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
3964 EV.getIndices());
3965 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
3966 ArrayRef(insi, inse));
3967 }
3968 if (insi == inse)
3969 // The insert list is a prefix of the extract list
3970 // We can simply remove the common indices from the extract and make it
3971 // operate on the inserted value instead of the insertvalue result.
3972 // i.e., replace
3973 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3974 // %E = extractvalue { i32, { i32 } } %I, 1, 0
3975 // with
3976 // %E extractvalue { i32 } { i32 42 }, 0
3977 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
3978 ArrayRef(exti, exte));
3979 }
3980
3981 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
3982 return R;
3983
3984 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
3985 // Bail out if the aggregate contains scalable vector type
3986 if (auto *STy = dyn_cast<StructType>(Agg->getType());
3987 STy && STy->containsScalableVectorType())
3988 return nullptr;
3989
3990 // If the (non-volatile) load only has one use, we can rewrite this to a
3991 // load from a GEP. This reduces the size of the load. If a load is used
3992 // only by extractvalue instructions then this either must have been
3993 // optimized before, or it is a struct with padding, in which case we
3994 // don't want to do the transformation as it loses padding knowledge.
3995 if (L->isSimple() && L->hasOneUse()) {
3996 // extractvalue has integer indices, getelementptr has Value*s. Convert.
3997 SmallVector<Value*, 4> Indices;
3998 // Prefix an i32 0 since we need the first element.
3999 Indices.push_back(Builder.getInt32(0));
4000 for (unsigned Idx : EV.indices())
4001 Indices.push_back(Builder.getInt32(Idx));
4002
4003 // We need to insert these at the location of the old load, not at that of
4004 // the extractvalue.
4006 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4007 L->getPointerOperand(), Indices);
4009 // Whatever aliasing information we had for the orignal load must also
4010 // hold for the smaller load, so propagate the annotations.
4011 NL->setAAMetadata(L->getAAMetadata());
4012 // Returning the load directly will cause the main loop to insert it in
4013 // the wrong spot, so use replaceInstUsesWith().
4014 return replaceInstUsesWith(EV, NL);
4015 }
4016 }
4017
4018 if (auto *PN = dyn_cast<PHINode>(Agg))
4019 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4020 return Res;
4021
4022 // Canonicalize extract (select Cond, TV, FV)
4023 // -> select cond, (extract TV), (extract FV)
4024 if (auto *SI = dyn_cast<SelectInst>(Agg))
4025 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4026 return R;
4027
4028 // We could simplify extracts from other values. Note that nested extracts may
4029 // already be simplified implicitly by the above: extract (extract (insert) )
4030 // will be translated into extract ( insert ( extract ) ) first and then just
4031 // the value inserted, if appropriate. Similarly for extracts from single-use
4032 // loads: extract (extract (load)) will be translated to extract (load (gep))
4033 // and if again single-use then via load (gep (gep)) to load (gep).
4034 // However, double extracts from e.g. function arguments or return values
4035 // aren't handled yet.
4036 return nullptr;
4037}
4038
4039/// Return 'true' if the given typeinfo will match anything.
4040static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4041 switch (Personality) {
4045 // The GCC C EH and Rust personality only exists to support cleanups, so
4046 // it's not clear what the semantics of catch clauses are.
4047 return false;
4049 return false;
4051 // While __gnat_all_others_value will match any Ada exception, it doesn't
4052 // match foreign exceptions (or didn't, before gcc-4.7).
4053 return false;
4064 return TypeInfo->isNullValue();
4065 }
4066 llvm_unreachable("invalid enum");
4067}
4068
4069static bool shorter_filter(const Value *LHS, const Value *RHS) {
4070 return
4071 cast<ArrayType>(LHS->getType())->getNumElements()
4072 <
4073 cast<ArrayType>(RHS->getType())->getNumElements();
4074}
4075
4077 // The logic here should be correct for any real-world personality function.
4078 // However if that turns out not to be true, the offending logic can always
4079 // be conditioned on the personality function, like the catch-all logic is.
4080 EHPersonality Personality =
4081 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4082
4083 // Simplify the list of clauses, eg by removing repeated catch clauses
4084 // (these are often created by inlining).
4085 bool MakeNewInstruction = false; // If true, recreate using the following:
4086 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4087 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4088
4089 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4090 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4091 bool isLastClause = i + 1 == e;
4092 if (LI.isCatch(i)) {
4093 // A catch clause.
4094 Constant *CatchClause = LI.getClause(i);
4095 Constant *TypeInfo = CatchClause->stripPointerCasts();
4096
4097 // If we already saw this clause, there is no point in having a second
4098 // copy of it.
4099 if (AlreadyCaught.insert(TypeInfo).second) {
4100 // This catch clause was not already seen.
4101 NewClauses.push_back(CatchClause);
4102 } else {
4103 // Repeated catch clause - drop the redundant copy.
4104 MakeNewInstruction = true;
4105 }
4106
4107 // If this is a catch-all then there is no point in keeping any following
4108 // clauses or marking the landingpad as having a cleanup.
4109 if (isCatchAll(Personality, TypeInfo)) {
4110 if (!isLastClause)
4111 MakeNewInstruction = true;
4112 CleanupFlag = false;
4113 break;
4114 }
4115 } else {
4116 // A filter clause. If any of the filter elements were already caught
4117 // then they can be dropped from the filter. It is tempting to try to
4118 // exploit the filter further by saying that any typeinfo that does not
4119 // occur in the filter can't be caught later (and thus can be dropped).
4120 // However this would be wrong, since typeinfos can match without being
4121 // equal (for example if one represents a C++ class, and the other some
4122 // class derived from it).
4123 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4124 Constant *FilterClause = LI.getClause(i);
4125 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4126 unsigned NumTypeInfos = FilterType->getNumElements();
4127
4128 // An empty filter catches everything, so there is no point in keeping any
4129 // following clauses or marking the landingpad as having a cleanup. By
4130 // dealing with this case here the following code is made a bit simpler.
4131 if (!NumTypeInfos) {
4132 NewClauses.push_back(FilterClause);
4133 if (!isLastClause)
4134 MakeNewInstruction = true;
4135 CleanupFlag = false;
4136 break;
4137 }
4138
4139 bool MakeNewFilter = false; // If true, make a new filter.
4140 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4141 if (isa<ConstantAggregateZero>(FilterClause)) {
4142 // Not an empty filter - it contains at least one null typeinfo.
4143 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4144 Constant *TypeInfo =
4146 // If this typeinfo is a catch-all then the filter can never match.
4147 if (isCatchAll(Personality, TypeInfo)) {
4148 // Throw the filter away.
4149 MakeNewInstruction = true;
4150 continue;
4151 }
4152
4153 // There is no point in having multiple copies of this typeinfo, so
4154 // discard all but the first copy if there is more than one.
4155 NewFilterElts.push_back(TypeInfo);
4156 if (NumTypeInfos > 1)
4157 MakeNewFilter = true;
4158 } else {
4159 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4160 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4161 NewFilterElts.reserve(NumTypeInfos);
4162
4163 // Remove any filter elements that were already caught or that already
4164 // occurred in the filter. While there, see if any of the elements are
4165 // catch-alls. If so, the filter can be discarded.
4166 bool SawCatchAll = false;
4167 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4168 Constant *Elt = Filter->getOperand(j);
4169 Constant *TypeInfo = Elt->stripPointerCasts();
4170 if (isCatchAll(Personality, TypeInfo)) {
4171 // This element is a catch-all. Bail out, noting this fact.
4172 SawCatchAll = true;
4173 break;
4174 }
4175
4176 // Even if we've seen a type in a catch clause, we don't want to
4177 // remove it from the filter. An unexpected type handler may be
4178 // set up for a call site which throws an exception of the same
4179 // type caught. In order for the exception thrown by the unexpected
4180 // handler to propagate correctly, the filter must be correctly
4181 // described for the call site.
4182 //
4183 // Example:
4184 //
4185 // void unexpected() { throw 1;}
4186 // void foo() throw (int) {
4187 // std::set_unexpected(unexpected);
4188 // try {
4189 // throw 2.0;
4190 // } catch (int i) {}
4191 // }
4192
4193 // There is no point in having multiple copies of the same typeinfo in
4194 // a filter, so only add it if we didn't already.
4195 if (SeenInFilter.insert(TypeInfo).second)
4196 NewFilterElts.push_back(cast<Constant>(Elt));
4197 }
4198 // A filter containing a catch-all cannot match anything by definition.
4199 if (SawCatchAll) {
4200 // Throw the filter away.
4201 MakeNewInstruction = true;
4202 continue;
4203 }
4204
4205 // If we dropped something from the filter, make a new one.
4206 if (NewFilterElts.size() < NumTypeInfos)
4207 MakeNewFilter = true;
4208 }
4209 if (MakeNewFilter) {
4210 FilterType = ArrayType::get(FilterType->getElementType(),
4211 NewFilterElts.size());
4212 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4213 MakeNewInstruction = true;
4214 }
4215
4216 NewClauses.push_back(FilterClause);
4217
4218 // If the new filter is empty then it will catch everything so there is
4219 // no point in keeping any following clauses or marking the landingpad
4220 // as having a cleanup. The case of the original filter being empty was
4221 // already handled above.
4222 if (MakeNewFilter && !NewFilterElts.size()) {
4223 assert(MakeNewInstruction && "New filter but not a new instruction!");
4224 CleanupFlag = false;
4225 break;
4226 }
4227 }
4228 }
4229
4230 // If several filters occur in a row then reorder them so that the shortest
4231 // filters come first (those with the smallest number of elements). This is
4232 // advantageous because shorter filters are more likely to match, speeding up
4233 // unwinding, but mostly because it increases the effectiveness of the other
4234 // filter optimizations below.
4235 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4236 unsigned j;
4237 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4238 for (j = i; j != e; ++j)
4239 if (!isa<ArrayType>(NewClauses[j]->getType()))
4240 break;
4241
4242 // Check whether the filters are already sorted by length. We need to know
4243 // if sorting them is actually going to do anything so that we only make a
4244 // new landingpad instruction if it does.
4245 for (unsigned k = i; k + 1 < j; ++k)
4246 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4247 // Not sorted, so sort the filters now. Doing an unstable sort would be
4248 // correct too but reordering filters pointlessly might confuse users.
4249 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4251 MakeNewInstruction = true;
4252 break;
4253 }
4254
4255 // Look for the next batch of filters.
4256 i = j + 1;
4257 }
4258
4259 // If typeinfos matched if and only if equal, then the elements of a filter L
4260 // that occurs later than a filter F could be replaced by the intersection of
4261 // the elements of F and L. In reality two typeinfos can match without being
4262 // equal (for example if one represents a C++ class, and the other some class
4263 // derived from it) so it would be wrong to perform this transform in general.
4264 // However the transform is correct and useful if F is a subset of L. In that
4265 // case L can be replaced by F, and thus removed altogether since repeating a
4266 // filter is pointless. So here we look at all pairs of filters F and L where
4267 // L follows F in the list of clauses, and remove L if every element of F is
4268 // an element of L. This can occur when inlining C++ functions with exception
4269 // specifications.
4270 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4271 // Examine each filter in turn.
4272 Value *Filter = NewClauses[i];
4273 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4274 if (!FTy)
4275 // Not a filter - skip it.
4276 continue;
4277 unsigned FElts = FTy->getNumElements();
4278 // Examine each filter following this one. Doing this backwards means that
4279 // we don't have to worry about filters disappearing under us when removed.
4280 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4281 Value *LFilter = NewClauses[j];
4282 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4283 if (!LTy)
4284 // Not a filter - skip it.
4285 continue;
4286 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4287 // an element of LFilter, then discard LFilter.
4288 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4289 // If Filter is empty then it is a subset of LFilter.
4290 if (!FElts) {
4291 // Discard LFilter.
4292 NewClauses.erase(J);
4293 MakeNewInstruction = true;
4294 // Move on to the next filter.
4295 continue;
4296 }
4297 unsigned LElts = LTy->getNumElements();
4298 // If Filter is longer than LFilter then it cannot be a subset of it.
4299 if (FElts > LElts)
4300 // Move on to the next filter.
4301 continue;
4302 // At this point we know that LFilter has at least one element.
4303 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4304 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4305 // already know that Filter is not longer than LFilter).
4306 if (isa<ConstantAggregateZero>(Filter)) {
4307 assert(FElts <= LElts && "Should have handled this case earlier!");
4308 // Discard LFilter.
4309 NewClauses.erase(J);
4310 MakeNewInstruction = true;
4311 }
4312 // Move on to the next filter.
4313 continue;
4314 }
4315 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4316 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4317 // Since Filter is non-empty and contains only zeros, it is a subset of
4318 // LFilter iff LFilter contains a zero.
4319 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4320 for (unsigned l = 0; l != LElts; ++l)
4321 if (LArray->getOperand(l)->isNullValue()) {
4322 // LFilter contains a zero - discard it.
4323 NewClauses.erase(J);
4324 MakeNewInstruction = true;
4325 break;
4326 }
4327 // Move on to the next filter.
4328 continue;
4329 }
4330 // At this point we know that both filters are ConstantArrays. Loop over
4331 // operands to see whether every element of Filter is also an element of
4332 // LFilter. Since filters tend to be short this is probably faster than
4333 // using a method that scales nicely.
4334 ConstantArray *FArray = cast<ConstantArray>(Filter);
4335 bool AllFound = true;
4336 for (unsigned f = 0; f != FElts; ++f) {
4337 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4338 AllFound = false;
4339 for (unsigned l = 0; l != LElts; ++l) {
4340 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4341 if (LTypeInfo == FTypeInfo) {
4342 AllFound = true;
4343 break;
4344 }
4345 }
4346 if (!AllFound)
4347 break;
4348 }
4349 if (AllFound) {
4350 // Discard LFilter.
4351 NewClauses.erase(J);
4352 MakeNewInstruction = true;
4353 }
4354 // Move on to the next filter.
4355 }
4356 }
4357
4358 // If we changed any of the clauses, replace the old landingpad instruction
4359 // with a new one.
4360 if (MakeNewInstruction) {
4361 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
4362 NewClauses.size());
4363 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
4364 NLI->addClause(NewClauses[i]);
4365 // A landing pad with no clauses must have the cleanup flag set. It is
4366 // theoretically possible, though highly unlikely, that we eliminated all
4367 // clauses. If so, force the cleanup flag to true.
4368 if (NewClauses.empty())
4369 CleanupFlag = true;
4370 NLI->setCleanup(CleanupFlag);
4371 return NLI;
4372 }
4373
4374 // Even if none of the clauses changed, we may nonetheless have understood
4375 // that the cleanup flag is pointless. Clear it if so.
4376 if (LI.isCleanup() != CleanupFlag) {
4377 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4378 LI.setCleanup(CleanupFlag);
4379 return &LI;
4380 }
4381
4382 return nullptr;
4383}
4384
4385Value *
4387 // Try to push freeze through instructions that propagate but don't produce
4388 // poison as far as possible. If an operand of freeze follows three
4389 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
4390 // guaranteed-non-poison operands then push the freeze through to the one
4391 // operand that is not guaranteed non-poison. The actual transform is as
4392 // follows.
4393 // Op1 = ... ; Op1 can be posion
4394 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
4395 // ; single guaranteed-non-poison operands
4396 // ... = Freeze(Op0)
4397 // =>
4398 // Op1 = ...
4399 // Op1.fr = Freeze(Op1)
4400 // ... = Inst(Op1.fr, NonPoisonOps...)
4401 auto *OrigOp = OrigFI.getOperand(0);
4402 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4403
4404 // While we could change the other users of OrigOp to use freeze(OrigOp), that
4405 // potentially reduces their optimization potential, so let's only do this iff
4406 // the OrigOp is only used by the freeze.
4407 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4408 return nullptr;
4409
4410 // We can't push the freeze through an instruction which can itself create
4411 // poison. If the only source of new poison is flags, we can simply
4412 // strip them (since we know the only use is the freeze and nothing can
4413 // benefit from them.)
4414 if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
4415 /*ConsiderFlagsAndMetadata*/ false))
4416 return nullptr;
4417
4418 // If operand is guaranteed not to be poison, there is no need to add freeze
4419 // to the operand. So we first find the operand that is not guaranteed to be
4420 // poison.
4421 Use *MaybePoisonOperand = nullptr;
4422 for (Use &U : OrigOpInst->operands()) {
4423 if (isa<MetadataAsValue>(U.get()) ||
4425 continue;
4426 if (!MaybePoisonOperand)
4427 MaybePoisonOperand = &U;
4428 else
4429 return nullptr;
4430 }
4431
4432 OrigOpInst->dropPoisonGeneratingAnnotations();
4433
4434 // If all operands are guaranteed to be non-poison, we can drop freeze.
4435 if (!MaybePoisonOperand)
4436 return OrigOp;
4437
4438 Builder.SetInsertPoint(OrigOpInst);
4439 auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
4440 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
4441
4442 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4443 return OrigOp;
4444}
4445
4447 PHINode *PN) {
4448 // Detect whether this is a recurrence with a start value and some number of
4449 // backedge values. We'll check whether we can push the freeze through the
4450 // backedge values (possibly dropping poison flags along the way) until we
4451 // reach the phi again. In that case, we can move the freeze to the start
4452 // value.
4453 Use *StartU = nullptr;
4455 for (Use &U : PN->incoming_values()) {
4456 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
4457 // Add backedge value to worklist.
4458 Worklist.push_back(U.get());
4459 continue;
4460 }
4461
4462 // Don't bother handling multiple start values.
4463 if (StartU)
4464 return nullptr;
4465 StartU = &U;
4466 }
4467
4468 if (!StartU || Worklist.empty())
4469 return nullptr; // Not a recurrence.
4470
4471 Value *StartV = StartU->get();
4472 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
4473 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
4474 // We can't insert freeze if the start value is the result of the
4475 // terminator (e.g. an invoke).
4476 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
4477 return nullptr;
4478
4481 while (!Worklist.empty()) {
4482 Value *V = Worklist.pop_back_val();
4483 if (!Visited.insert(V).second)
4484 continue;
4485
4486 if (Visited.size() > 32)
4487 return nullptr; // Limit the total number of values we inspect.
4488
4489 // Assume that PN is non-poison, because it will be after the transform.
4490 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
4491 continue;
4492
4493 Instruction *I = dyn_cast<Instruction>(V);
4494 if (!I || canCreateUndefOrPoison(cast<Operator>(I),
4495 /*ConsiderFlagsAndMetadata*/ false))
4496 return nullptr;
4497
4498 DropFlags.push_back(I);
4499 append_range(Worklist, I->operands());
4500 }
4501
4502 for (Instruction *I : DropFlags)
4503 I->dropPoisonGeneratingAnnotations();
4504
4505 if (StartNeedsFreeze) {
4507 Value *FrozenStartV = Builder.CreateFreeze(StartV,
4508 StartV->getName() + ".fr");
4509 replaceUse(*StartU, FrozenStartV);
4510 }
4511 return replaceInstUsesWith(FI, PN);
4512}
4513
4515 Value *Op = FI.getOperand(0);
4516
4517 if (isa<Constant>(Op) || Op->hasOneUse())
4518 return false;
4519
4520 // Move the freeze directly after the definition of its operand, so that
4521 // it dominates the maximum number of uses. Note that it may not dominate
4522 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
4523 // the normal/default destination. This is why the domination check in the
4524 // replacement below is still necessary.
4525 BasicBlock::iterator MoveBefore;
4526 if (isa<Argument>(Op)) {
4527 MoveBefore =
4529 } else {
4530 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
4531 if (!MoveBeforeOpt)
4532 return false;
4533 MoveBefore = *MoveBeforeOpt;
4534 }
4535
4536 // Don't move to the position of a debug intrinsic.
4537 if (isa<DbgInfoIntrinsic>(MoveBefore))
4538 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4539 // Re-point iterator to come after any debug-info records, if we're
4540 // running in "RemoveDIs" mode
4541 MoveBefore.setHeadBit(false);
4542
4543 bool Changed = false;
4544 if (&FI != &*MoveBefore) {
4545 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
4546 Changed = true;
4547 }
4548
4549 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
4550 bool Dominates = DT.dominates(&FI, U);
4551 Changed |= Dominates;
4552 return Dominates;
4553 });
4554
4555 return Changed;
4556}
4557
4558// Check if any direct or bitcast user of this value is a shuffle instruction.
4560 for (auto *U : V->users()) {
4561 if (isa<ShuffleVectorInst>(U))
4562 return true;
4563 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
4564 return true;
4565 }
4566 return false;
4567}
4568
4570 Value *Op0 = I.getOperand(0);
4571
4573 return replaceInstUsesWith(I, V);
4574
4575 // freeze (phi const, x) --> phi const, (freeze x)
4576 if (auto *PN = dyn_cast<PHINode>(Op0)) {
4577 if (Instruction *NV = foldOpIntoPhi(I, PN))
4578 return NV;
4579 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
4580 return NV;
4581 }
4582
4584 return replaceInstUsesWith(I, NI);
4585
4586 // If I is freeze(undef), check its uses and fold it to a fixed constant.
4587 // - or: pick -1
4588 // - select's condition: if the true value is constant, choose it by making
4589 // the condition true.
4590 // - default: pick 0
4591 //
4592 // Note that this transform is intentionally done here rather than
4593 // via an analysis in InstSimplify or at individual user sites. That is
4594 // because we must produce the same value for all uses of the freeze -
4595 // it's the reason "freeze" exists!
4596 //
4597 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
4598 // duplicating logic for binops at least.
4599 auto getUndefReplacement = [&I](Type *Ty) {
4600 Constant *BestValue = nullptr;
4601 Constant *NullValue = Constant::getNullValue(Ty);
4602 for (const auto *U : I.users()) {
4603 Constant *C = NullValue;
4604 if (match(U, m_Or(m_Value(), m_Value())))
4606 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
4607 C = ConstantInt::getTrue(Ty);
4608
4609 if (!BestValue)
4610 BestValue = C;
4611 else if (BestValue != C)
4612 BestValue = NullValue;
4613 }
4614 assert(BestValue && "Must have at least one use");
4615 return BestValue;
4616 };
4617
4618 if (match(Op0, m_Undef())) {
4619 // Don't fold freeze(undef/poison) if it's used as a vector operand in
4620 // a shuffle. This may improve codegen for shuffles that allow
4621 // unspecified inputs.
4623 return nullptr;
4624 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
4625 }
4626
4627 Constant *C;
4628 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
4629 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
4631 }
4632
4633 // Replace uses of Op with freeze(Op).
4634 if (freezeOtherUses(I))
4635 return &I;
4636
4637 return nullptr;
4638}
4639
4640/// Check for case where the call writes to an otherwise dead alloca. This
4641/// shows up for unused out-params in idiomatic C/C++ code. Note that this
4642/// helper *only* analyzes the write; doesn't check any other legality aspect.
4644 auto *CB = dyn_cast<CallBase>(I);
4645 if (!CB)
4646 // TODO: handle e.g. store to alloca here - only worth doing if we extend
4647 // to allow reload along used path as described below. Otherwise, this
4648 // is simply a store to a dead allocation which will be removed.
4649 return false;
4650 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
4651 if (!Dest)
4652 return false;
4653 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
4654 if (!AI)
4655 // TODO: allow malloc?
4656 return false;
4657 // TODO: allow memory access dominated by move point? Note that since AI
4658 // could have a reference to itself captured by the call, we would need to
4659 // account for cycles in doing so.
4660 SmallVector<const User *> AllocaUsers;
4662 auto pushUsers = [&](const Instruction &I) {
4663 for (const User *U : I.users()) {
4664 if (Visited.insert(U).second)
4665 AllocaUsers.push_back(U);
4666 }
4667 };
4668 pushUsers(*AI);
4669 while (!AllocaUsers.empty()) {
4670 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
4671 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
4672 isa<AddrSpaceCastInst>(UserI)) {
4673 pushUsers(*UserI);
4674 continue;
4675 }
4676 if (UserI == CB)
4677 continue;
4678 // TODO: support lifetime.start/end here
4679 return false;
4680 }
4681 return true;
4682}
4683
4684/// Try to move the specified instruction from its current block into the
4685/// beginning of DestBlock, which can only happen if it's safe to move the
4686/// instruction past all of the instructions between it and the end of its
4687/// block.
4689 BasicBlock *DestBlock) {
4690 BasicBlock *SrcBlock = I->getParent();
4691
4692 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4693 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
4694 I->isTerminator())
4695 return false;
4696
4697 // Do not sink static or dynamic alloca instructions. Static allocas must
4698 // remain in the entry block, and dynamic allocas must not be sunk in between
4699 // a stacksave / stackrestore pair, which would incorrectly shorten its
4700 // lifetime.
4701 if (isa<AllocaInst>(I))
4702 return false;
4703
4704 // Do not sink into catchswitch blocks.
4705 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
4706 return false;
4707
4708 // Do not sink convergent call instructions.
4709 if (auto *CI = dyn_cast<CallInst>(I)) {
4710 if (CI->isConvergent())
4711 return false;
4712 }
4713
4714 // Unless we can prove that the memory write isn't visibile except on the
4715 // path we're sinking to, we must bail.
4716 if (I->mayWriteToMemory()) {
4717 if (!SoleWriteToDeadLocal(I, TLI))
4718 return false;
4719 }
4720
4721 // We can only sink load instructions if there is nothing between the load and
4722 // the end of block that could change the value.
4723 if (I->mayReadFromMemory()) {
4724 // We don't want to do any sophisticated alias analysis, so we only check
4725 // the instructions after I in I's parent block if we try to sink to its
4726 // successor block.
4727 if (DestBlock->getUniquePredecessor() != I->getParent())
4728 return false;
4729 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
4730 E = I->getParent()->end();
4731 Scan != E; ++Scan)
4732 if (Scan->mayWriteToMemory())
4733 return false;
4734 }
4735
4736 I->dropDroppableUses([&](const Use *U) {
4737 auto *I = dyn_cast<Instruction>(U->getUser());
4738 if (I && I->getParent() != DestBlock) {
4739 Worklist.add(I);
4740 return true;
4741 }
4742 return false;
4743 });
4744 /// FIXME: We could remove droppable uses that are not dominated by
4745 /// the new position.
4746
4747 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
4748 I->moveBefore(*DestBlock, InsertPos);
4749 ++NumSunkInst;
4750
4751 // Also sink all related debug uses from the source basic block. Otherwise we
4752 // get debug use before the def. Attempt to salvage debug uses first, to
4753 // maximise the range variables have location for. If we cannot salvage, then
4754 // mark the location undef: we know it was supposed to receive a new location
4755 // here, but that computation has been sunk.
4757 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
4758 findDbgUsers(DbgUsers, I, &DbgVariableRecords);
4759 if (!DbgUsers.empty())
4760 tryToSinkInstructionDbgValues(I, InsertPos, SrcBlock, DestBlock, DbgUsers);
4761 if (!DbgVariableRecords.empty())
4762 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
4763 DbgVariableRecords);
4764
4765 // PS: there are numerous flaws with this behaviour, not least that right now
4766 // assignments can be re-ordered past other assignments to the same variable
4767 // if they use different Values. Creating more undef assignements can never be
4768 // undone. And salvaging all users outside of this block can un-necessarily
4769 // alter the lifetime of the live-value that the variable refers to.
4770 // Some of these things can be resolved by tolerating debug use-before-defs in
4771 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
4772 // being used for more architectures.
4773
4774 return true;
4775}
4776
4778 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4780 // For all debug values in the destination block, the sunk instruction
4781 // will still be available, so they do not need to be dropped.
4783 for (auto &DbgUser : DbgUsers)
4784 if (DbgUser->getParent() != DestBlock)
4785 DbgUsersToSalvage.push_back(DbgUser);
4786
4787 // Process the sinking DbgUsersToSalvage in reverse order, as we only want
4788 // to clone the last appearing debug intrinsic for each given variable.
4790 for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage)
4791 if (DVI->getParent() == SrcBlock)
4792 DbgUsersToSink.push_back(DVI);
4793 llvm::sort(DbgUsersToSink,
4794 [](auto *A, auto *B) { return B->comesBefore(A); });
4795
4797 SmallSet<DebugVariable, 4> SunkVariables;
4798 for (auto *User : DbgUsersToSink) {
4799 // A dbg.declare instruction should not be cloned, since there can only be
4800 // one per variable fragment. It should be left in the original place
4801 // because the sunk instruction is not an alloca (otherwise we could not be
4802 // here).
4803 if (isa<DbgDeclareInst>(User))
4804 continue;
4805
4806 DebugVariable DbgUserVariable =
4807 DebugVariable(User->getVariable(), User->getExpression(),
4808 User->getDebugLoc()->getInlinedAt());
4809
4810 if (!SunkVariables.insert(DbgUserVariable).second)
4811 continue;
4812
4813 // Leave dbg.assign intrinsics in their original positions and there should
4814 // be no need to insert a clone.
4815 if (isa<DbgAssignIntrinsic>(User))
4816 continue;
4817
4818 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
4819 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
4820 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
4821 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
4822 }
4823
4824 // Perform salvaging without the clones, then sink the clones.
4825 if (!DIIClones.empty()) {
4826 salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, {});
4827 // The clones are in reverse order of original appearance, reverse again to
4828 // maintain the original order.
4829 for (auto &DIIClone : llvm::reverse(DIIClones)) {
4830 DIIClone->insertBefore(&*InsertPos);
4831 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
4832 }
4833 }
4834}
4835
4837 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4838 BasicBlock *DestBlock,
4839 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
4840 // Implementation of tryToSinkInstructionDbgValues, but for the
4841 // DbgVariableRecord of variable assignments rather than dbg.values.
4842
4843 // Fetch all DbgVariableRecords not already in the destination.
4844 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
4845 for (auto &DVR : DbgVariableRecords)
4846 if (DVR->getParent() != DestBlock)
4847 DbgVariableRecordsToSalvage.push_back(DVR);
4848
4849 // Fetch a second collection, of DbgVariableRecords in the source block that
4850 // we're going to sink.
4851 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
4852 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
4853 if (DVR->getParent() == SrcBlock)
4854 DbgVariableRecordsToSink.push_back(DVR);
4855
4856 // Sort DbgVariableRecords according to their position in the block. This is a
4857 // partial order: DbgVariableRecords attached to different instructions will
4858 // be ordered by the instruction order, but DbgVariableRecords attached to the
4859 // same instruction won't have an order.
4860 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
4861 return B->getInstruction()->comesBefore(A->getInstruction());
4862 };
4863 llvm::stable_sort(DbgVariableRecordsToSink, Order);
4864
4865 // If there are two assignments to the same variable attached to the same
4866 // instruction, the ordering between the two assignments is important. Scan
4867 // for this (rare) case and establish which is the last assignment.
4868 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
4870 if (DbgVariableRecordsToSink.size() > 1) {
4872 // Count how many assignments to each variable there is per instruction.
4873 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
4874 DebugVariable DbgUserVariable =
4875 DebugVariable(DVR->getVariable(), DVR->getExpression(),
4876 DVR->getDebugLoc()->getInlinedAt());
4877 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
4878 }
4879
4880 // If there are any instructions with two assignments, add them to the
4881 // FilterOutMap to record that they need extra filtering.
4883 for (auto It : CountMap) {
4884 if (It.second > 1) {
4885 FilterOutMap[It.first] = nullptr;
4886 DupSet.insert(It.first.first);
4887 }
4888 }
4889
4890 // For all instruction/variable pairs needing extra filtering, find the
4891 // latest assignment.
4892 for (const Instruction *Inst : DupSet) {
4893 for (DbgVariableRecord &DVR :
4894 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
4895 DebugVariable DbgUserVariable =
4896 DebugVariable(DVR.getVariable(), DVR.getExpression(),
4897 DVR.getDebugLoc()->getInlinedAt());
4898 auto FilterIt =
4899 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
4900 if (FilterIt == FilterOutMap.end())
4901 continue;
4902 if (FilterIt->second != nullptr)
4903 continue;
4904 FilterIt->second = &DVR;
4905 }
4906 }
4907 }
4908
4909 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
4910 // out any duplicate assignments identified above.
4912 SmallSet<DebugVariable, 4> SunkVariables;
4913 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
4915 continue;
4916
4917 DebugVariable DbgUserVariable =
4918 DebugVariable(DVR->getVariable(), DVR->getExpression(),
4919 DVR->getDebugLoc()->getInlinedAt());
4920
4921 // For any variable where there were multiple assignments in the same place,
4922 // ignore all but the last assignment.
4923 if (!FilterOutMap.empty()) {
4924 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
4925 auto It = FilterOutMap.find(IVP);
4926
4927 // Filter out.
4928 if (It != FilterOutMap.end() && It->second != DVR)
4929 continue;
4930 }
4931
4932 if (!SunkVariables.insert(DbgUserVariable).second)
4933 continue;
4934
4935 if (DVR->isDbgAssign())
4936 continue;
4937
4938 DVRClones.emplace_back(DVR->clone());
4939 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
4940 }
4941
4942 // Perform salvaging without the clones, then sink the clones.
4943 if (DVRClones.empty())
4944 return;
4945
4946 salvageDebugInfoForDbgValues(*I, {}, DbgVariableRecordsToSalvage);
4947
4948 // The clones are in reverse order of original appearance. Assert that the
4949 // head bit is set on the iterator as we _should_ have received it via
4950 // getFirstInsertionPt. Inserting like this will reverse the clone order as
4951 // we'll repeatedly insert at the head, such as:
4952 // DVR-3 (third insertion goes here)
4953 // DVR-2 (second insertion goes here)
4954 // DVR-1 (first insertion goes here)
4955 // Any-Prior-DVRs
4956 // InsertPtInst
4957 assert(InsertPos.getHeadBit());
4958 for (DbgVariableRecord *DVRClone : DVRClones) {
4959 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
4960 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
4961 }
4962}
4963
4965 while (!Worklist.isEmpty()) {
4966 // Walk deferred instructions in reverse order, and push them to the
4967 // worklist, which means they'll end up popped from the worklist in-order.
4968 while (Instruction *I = Worklist.popDeferred()) {
4969 // Check to see if we can DCE the instruction. We do this already here to
4970 // reduce the number of uses and thus allow other folds to trigger.
4971 // Note that eraseInstFromFunction() may push additional instructions on
4972 // the deferred worklist, so this will DCE whole instruction chains.
4975 ++NumDeadInst;
4976 continue;
4977 }
4978
4979 Worklist.push(I);
4980 }
4981
4983 if (I == nullptr) continue; // skip null values.
4984
4985 // Check to see if we can DCE the instruction.
4988 ++NumDeadInst;
4989 continue;
4990 }
4991
4992 if (!DebugCounter::shouldExecute(VisitCounter))
4993 continue;
4994
4995 // See if we can trivially sink this instruction to its user if we can
4996 // prove that the successor is not executed more frequently than our block.
4997 // Return the UserBlock if successful.
4998 auto getOptionalSinkBlockForInst =
4999 [this](Instruction *I) -> std::optional<BasicBlock *> {
5000 if (!EnableCodeSinking)
5001 return std::nullopt;
5002
5003 BasicBlock *BB = I->getParent();
5004 BasicBlock *UserParent = nullptr;
5005 unsigned NumUsers = 0;
5006
5007 for (Use &U : I->uses()) {
5008 User *User = U.getUser();
5009 if (User->isDroppable())
5010 continue;
5011 if (NumUsers > MaxSinkNumUsers)
5012 return std::nullopt;
5013
5014 Instruction *UserInst = cast<Instruction>(User);
5015 // Special handling for Phi nodes - get the block the use occurs in.
5016 BasicBlock *UserBB = UserInst->getParent();
5017 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5018 UserBB = PN->getIncomingBlock(U);
5019 // Bail out if we have uses in different blocks. We don't do any
5020 // sophisticated analysis (i.e finding NearestCommonDominator of these
5021 // use blocks).
5022 if (UserParent && UserParent != UserBB)
5023 return std::nullopt;
5024 UserParent = UserBB;
5025
5026 // Make sure these checks are done only once, naturally we do the checks
5027 // the first time we get the userparent, this will save compile time.
5028 if (NumUsers == 0) {
5029 // Try sinking to another block. If that block is unreachable, then do
5030 // not bother. SimplifyCFG should handle it.
5031 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5032 return std::nullopt;
5033
5034 auto *Term = UserParent->getTerminator();
5035 // See if the user is one of our successors that has only one
5036 // predecessor, so that we don't have to split the critical edge.
5037 // Another option where we can sink is a block that ends with a
5038 // terminator that does not pass control to other block (such as
5039 // return or unreachable or resume). In this case:
5040 // - I dominates the User (by SSA form);
5041 // - the User will be executed at most once.
5042 // So sinking I down to User is always profitable or neutral.
5043 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5044 return std::nullopt;
5045
5046 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5047 }
5048
5049 NumUsers++;
5050 }
5051
5052 // No user or only has droppable users.
5053 if (!UserParent)
5054 return std::nullopt;
5055
5056 return UserParent;
5057 };
5058
5059 auto OptBB = getOptionalSinkBlockForInst(I);
5060 if (OptBB) {
5061 auto *UserParent = *OptBB;
5062 // Okay, the CFG is simple enough, try to sink this instruction.
5063 if (tryToSinkInstruction(I, UserParent)) {
5064 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5065 MadeIRChange = true;
5066 // We'll add uses of the sunk instruction below, but since
5067 // sinking can expose opportunities for it's *operands* add
5068 // them to the worklist
5069 for (Use &U : I->operands())
5070 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5071 Worklist.push(OpI);
5072 }
5073 }
5074
5075 // Now that we have an instruction, try combining it to simplify it.
5078 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5079
5080#ifndef NDEBUG
5081 std::string OrigI;
5082#endif
5083 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
5084 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5085
5086 if (Instruction *Result = visit(*I)) {
5087 ++NumCombined;
5088 // Should we replace the old instruction with a new one?
5089 if (Result != I) {
5090 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5091 << " New = " << *Result << '\n');
5092
5093 Result->copyMetadata(*I,
5094 {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5095 // Everything uses the new instruction now.
5096 I->replaceAllUsesWith(Result);
5097
5098 // Move the name to the new instruction first.
5099 Result->takeName(I);
5100
5101 // Insert the new instruction into the basic block...
5102 BasicBlock *InstParent = I->getParent();
5103 BasicBlock::iterator InsertPos = I->getIterator();
5104
5105 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5106 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5107 // We need to fix up the insertion point.
5108 if (isa<PHINode>(I)) // PHI -> Non-PHI
5109 InsertPos = InstParent->getFirstInsertionPt();
5110 else // Non-PHI -> PHI
5111 InsertPos = InstParent->getFirstNonPHIIt();
5112 }
5113
5114 Result->insertInto(InstParent, InsertPos);
5115
5116 // Push the new instruction and any users onto the worklist.
5118 Worklist.push(Result);
5119
5121 } else {
5122 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5123 << " New = " << *I << '\n');
5124
5125 // If the instruction was modified, it's possible that it is now dead.
5126 // if so, remove it.
5129 } else {
5131 Worklist.push(I);
5132 }
5133 }
5134 MadeIRChange = true;
5135 }
5136 }
5137
5138 Worklist.zap();
5139 return MadeIRChange;
5140}
5141
5142// Track the scopes used by !alias.scope and !noalias. In a function, a
5143// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5144// by both sets. If not, the declaration of the scope can be safely omitted.
5145// The MDNode of the scope can be omitted as well for the instructions that are
5146// part of this function. We do not do that at this point, as this might become
5147// too time consuming to do.
5149 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5150 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5151
5152public:
5154 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5155 if (!I->hasMetadataOtherThanDebugLoc())
5156 return;
5157
5158 auto Track = [](Metadata *ScopeList, auto &Container) {
5159 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5160 if (!MDScopeList || !Container.insert(MDScopeList).second)
5161 return;
5162 for (const auto &MDOperand : MDScopeList->operands())
5163 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5164 Container.insert(MDScope);
5165 };
5166
5167 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5168 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5169 }
5170
5172 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
5173 if (!Decl)
5174 return false;
5175
5176 assert(Decl->use_empty() &&
5177 "llvm.experimental.noalias.scope.decl in use ?");
5178 const MDNode *MDSL = Decl->getScopeList();
5179 assert(MDSL->getNumOperands() == 1 &&
5180 "llvm.experimental.noalias.scope should refer to a single scope");
5181 auto &MDOperand = MDSL->getOperand(0);
5182 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5183 return !UsedAliasScopesAndLists.contains(MD) ||
5184 !UsedNoAliasScopesAndLists.contains(MD);
5185
5186 // Not an MDNode ? throw away.
5187 return true;
5188 }
5189};
5190
5191/// Populate the IC worklist from a function, by walking it in reverse
5192/// post-order and adding all reachable code to the worklist.
5193///
5194/// This has a couple of tricks to make the code faster and more powerful. In
5195/// particular, we constant fold and DCE instructions as we go, to avoid adding
5196/// them to the worklist (this significantly speeds up instcombine on code where
5197/// many instructions are dead or constant). Additionally, if we find a branch
5198/// whose condition is a known constant, we only visit the reachable successors.
5201 bool MadeIRChange = false;
5203 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5204 DenseMap<Constant *, Constant *> FoldedConstants;
5205 AliasScopeTracker SeenAliasScopes;
5206
5207 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5208 for (BasicBlock *Succ : successors(BB))
5209 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5210 for (PHINode &PN : Succ->phis())
5211 for (Use &U : PN.incoming_values())
5212 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5213 U.set(PoisonValue::get(PN.getType()));
5214 MadeIRChange = true;
5215 }
5216 };
5217
5218 for (BasicBlock *BB : RPOT) {
5219 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5220 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5221 })) {
5222 HandleOnlyLiveSuccessor(BB, nullptr);
5223 continue;
5224 }
5225 LiveBlocks.insert(BB);
5226
5227 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5228 // ConstantProp instruction if trivially constant.
5229 if (!Inst.use_empty() &&
5230 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5231 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5232 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5233 << '\n');
5234 Inst.replaceAllUsesWith(C);
5235 ++NumConstProp;
5236 if (isInstructionTriviallyDead(&Inst, &TLI))
5237 Inst.eraseFromParent();
5238 MadeIRChange = true;
5239 continue;
5240 }
5241
5242 // See if we can constant fold its operands.
5243 for (Use &U : Inst.operands()) {
5244 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5245 continue;
5246
5247 auto *C = cast<Constant>(U);
5248 Constant *&FoldRes = FoldedConstants[C];
5249 if (!FoldRes)
5250 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5251
5252 if (FoldRes != C) {
5253 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5254 << "\n Old = " << *C
5255 << "\n New = " << *FoldRes << '\n');
5256 U = FoldRes;
5257 MadeIRChange = true;
5258 }
5259 }
5260
5261 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5262 // these call instructions consumes non-trivial amount of time and
5263 // provides no value for the optimization.
5264 if (!Inst.isDebugOrPseudoInst()) {
5265 InstrsForInstructionWorklist.push_back(&Inst);
5266 SeenAliasScopes.analyse(&Inst);
5267 }
5268 }
5269
5270 // If this is a branch or switch on a constant, mark only the single
5271 // live successor. Otherwise assume all successors are live.
5272 Instruction *TI = BB->getTerminator();
5273 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5274 if (isa<UndefValue>(BI->getCondition())) {
5275 // Branch on undef is UB.
5276 HandleOnlyLiveSuccessor(BB, nullptr);
5277 continue;
5278 }
5279 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5280 bool CondVal = Cond->getZExtValue();
5281 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5282 continue;
5283 }
5284 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5285 if (isa<UndefValue>(SI->getCondition())) {
5286 // Switch on undef is UB.
5287 HandleOnlyLiveSuccessor(BB, nullptr);
5288 continue;
5289 }
5290 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5291 HandleOnlyLiveSuccessor(BB,
5292 SI->findCaseValue(Cond)->getCaseSuccessor());
5293 continue;
5294 }
5295 }
5296 }
5297
5298 // Remove instructions inside unreachable blocks. This prevents the
5299 // instcombine code from having to deal with some bad special cases, and
5300 // reduces use counts of instructions.
5301 for (BasicBlock &BB : F) {
5302 if (LiveBlocks.count(&BB))
5303 continue;
5304
5305 unsigned NumDeadInstInBB;
5306 unsigned NumDeadDbgInstInBB;
5307 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5309
5310 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5311 NumDeadInst += NumDeadInstInBB;
5312 }
5313
5314 // Once we've found all of the instructions to add to instcombine's worklist,
5315 // add them in reverse order. This way instcombine will visit from the top
5316 // of the function down. This jives well with the way that it adds all uses
5317 // of instructions to the worklist after doing a transformation, thus avoiding
5318 // some N^2 behavior in pathological cases.
5319 Worklist.reserve(InstrsForInstructionWorklist.size());
5320 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5321 // DCE instruction if trivially dead. As we iterate in reverse program
5322 // order here, we will clean up whole chains of dead instructions.
5323 if (isInstructionTriviallyDead(Inst, &TLI) ||
5324 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5325 ++NumDeadInst;
5326 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5327 salvageDebugInfo(*Inst);
5328 Inst->eraseFromParent();
5329 MadeIRChange = true;
5330 continue;
5331 }
5332
5333 Worklist.push(Inst);
5334 }
5335
5336 return MadeIRChange;
5337}
5338
5344 const InstCombineOptions &Opts) {
5345 auto &DL = F.getDataLayout();
5346
5347 /// Builder - This is an IRBuilder that automatically inserts new
5348 /// instructions into the worklist when they are created.
5350 F.getContext(), TargetFolder(DL),
5351 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5352 Worklist.add(I);
5353 if (auto *Assume = dyn_cast<AssumeInst>(I))
5354 AC.registerAssumption(Assume);
5355 }));
5356
5358
5359 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5360 // by instcombiner.
5361 bool MadeIRChange = false;
5363 MadeIRChange = LowerDbgDeclare(F);
5364
5365 // Iterate while there is work to do.
5366 unsigned Iteration = 0;
5367 while (true) {
5368 ++Iteration;
5369
5370 if (Iteration > Opts.MaxIterations && !Opts.VerifyFixpoint) {
5371 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5372 << " on " << F.getName()
5373 << " reached; stopping without verifying fixpoint\n");
5374 break;
5375 }
5376
5377 ++NumWorklistIterations;
5378 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5379 << F.getName() << "\n");
5380
5381 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5382 ORE, BFI, BPI, PSI, DL, LI);
5384 bool MadeChangeInThisIteration = IC.prepareWorklist(F, RPOT);
5385 MadeChangeInThisIteration |= IC.run();
5386 if (!MadeChangeInThisIteration)
5387 break;
5388
5389 MadeIRChange = true;
5390 if (Iteration > Opts.MaxIterations) {
5392 "Instruction Combining did not reach a fixpoint after " +
5393 Twine(Opts.MaxIterations) + " iterations",
5394 /*GenCrashDiag=*/false);
5395 }
5396 }
5397
5398 if (Iteration == 1)
5399 ++NumOneIteration;
5400 else if (Iteration == 2)
5401 ++NumTwoIterations;
5402 else if (Iteration == 3)
5403 ++NumThreeIterations;
5404 else
5405 ++NumFourOrMoreIterations;
5406
5407 return MadeIRChange;
5408}
5409
5411
5413 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5414 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5415 OS, MapClassName2PassName);
5416 OS << '<';
5417 OS << "max-iterations=" << Options.MaxIterations << ";";
5418 OS << (Options.UseLoopInfo ? "" : "no-") << "use-loop-info;";
5419 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5420 OS << '>';
5421}
5422
5425 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5426 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5427 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5429 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5430
5431 // TODO: Only use LoopInfo when the option is set. This requires that the
5432 // callers in the pass pipeline explicitly set the option.
5433 auto *LI = AM.getCachedResult<LoopAnalysis>(F);
5434 if (!LI && Options.UseLoopInfo)
5435 LI = &AM.getResult<LoopAnalysis>(F);
5436
5437 auto *AA = &AM.getResult<AAManager>(F);
5438 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5439 ProfileSummaryInfo *PSI =
5440 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5441 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5442 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
5444
5445 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5446 BFI, BPI, PSI, LI, Options))
5447 // No changes, all analyses are preserved.
5448 return PreservedAnalyses::all();
5449
5450 // Mark all the analyses that instcombine updates as preserved.
5453 return PA;
5454}
5455
5457 AU.setPreservesCFG();
5470}
5471
5473 if (skipFunction(F))
5474 return false;
5475
5476 // Required analyses.
5477 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5478 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5479 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
5480 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5481 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5482 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5483
5484 // Optional analyses.
5485 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
5486 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
5487 ProfileSummaryInfo *PSI =
5488 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5489 BlockFrequencyInfo *BFI =
5490 (PSI && PSI->hasProfileSummary()) ?
5491 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5492 nullptr;
5493 BranchProbabilityInfo *BPI = nullptr;
5494 if (auto *WrapperPass =
5495 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5496 BPI = &WrapperPass->getBPI();
5497
5498 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5499 BFI, BPI, PSI, LI,
5501}
5502
5504
5507}
5508
5510 "Combine redundant instructions", false, false)
5522
5523// Initialization Routines
5526}
5527
5529 return new InstructionCombiningPass();
5530}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:190
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
#define NL
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
Hexagon Vector Combine
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
Definition: IVUsers.cpp:48
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, LoopInfo *LI, const InstCombineOptions &Opts)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Constant * constantFoldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2)
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
This defines the Use class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:77
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:213
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:402
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:906
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:359
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1447
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1898
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:806
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:313
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1129
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:419
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:285
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1911
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:830
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:424
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:269
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
Class to represent array types.
Definition: DerivedTypes.h:371
uint64_t getNumElements() const
Definition: DerivedTypes.h:383
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
Type * getElementType() const
Definition: DerivedTypes.h:384
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
Definition: Attributes.cpp:438
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:507
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:414
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
Definition: BasicBlock.cpp:248
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:372
const Instruction & front() const
Definition: BasicBlock.h:461
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:569
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:457
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:465
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:167
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
Definition: BasicBlock.cpp:428
size_t size() const
Definition: BasicBlock.h:459
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:331
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1546
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:2015
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1542
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:909
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
ConstantArray - Constant Array Declarations.
Definition: Constants.h:424
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1292
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:767
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2568
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2555
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2561
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2615
static Constant * getNeg(Constant *C, bool HasNSW=false)
Definition: Constants.cpp:2549
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:857
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:864
This class represents a range of values.
Definition: ConstantRange.h:47
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition: Constants.h:508
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1399
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:400
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:768
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
const Constant * stripPointerCasts() const
Definition: Constant.h:213
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:432
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
Definition: DataLayout.cpp:998
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
Definition: DataLayout.h:260
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:774
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:905
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:504
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:420
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:672
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:920
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:87
Identifies a unique instance of a variable.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:202
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
const BasicBlock & getEntryBlock() const
Definition: Function.h:800
Represents flags for the getelementptr instruction/expression.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:411
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:914
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:937
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:960
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:921
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1686
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2514
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:932
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1090
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2031
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2533
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1974
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:309
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1872
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1864
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Definition: IRBuilder.h:231
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:484
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2364
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2395
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1747
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1342
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1788
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2492
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1473
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1325
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2005
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1664
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2194
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:178
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1454
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1517
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2349
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1680
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:514
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition: IRBuilder.h:500
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition: IRBuilder.h:75
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
bool prepareWorklist(Function &F, ReversePostOrderTraversal< BasicBlock * > &RPOT)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
Definition: InstCombiner.h:47
SimplifyQuery SQ
Definition: InstCombiner.h:76
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:341
static bool isCanonicalPredicate(CmpInst::Predicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
Definition: InstCombiner.h:157
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Definition: InstCombiner.h:232
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Definition: InstCombiner.h:139
TargetLibraryInfo & TLI
Definition: InstCombiner.h:73
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:366
AAResults * AA
Definition: InstCombiner.h:69
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:386
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:55
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
Definition: InstCombiner.h:191
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
Definition: InstCombiner.h:418
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:64
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
Definition: InstCombiner.h:375
BranchProbabilityInfo * BPI
Definition: InstCombiner.h:79
const DataLayout & DL
Definition: InstCombiner.h:75
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombiner.h:452
DomConditionCache DC
Definition: InstCombiner.h:81
const bool MinimizeSize
Definition: InstCombiner.h:67
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:336
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:410
DominatorTree & DT
Definition: InstCombiner.h:74
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
Definition: InstCombiner.h:284
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
Definition: InstCombiner.h:90
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
Definition: InstCombiner.h:431
BuilderTy & Builder
Definition: InstCombiner.h:60
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
Definition: InstCombiner.h:213
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition: InstCombine.h:71
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
void zap()
Check that the worklist is empty and nuke the backing store for the map.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
Definition: Instruction.h:323
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:476
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
bool isTerminator() const
Definition: Instruction.h:277
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
Definition: Instruction.h:328
bool isShift() const
Definition: Instruction.h:281
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:473
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isIntDivRem() const
Definition: Instruction.h:280
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Definition: Instructions.h:173
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:571
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition: Metadata.h:62
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:688
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:110
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:104
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition: Constants.h:1433
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1814
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition: SmallPtrSet.h:94
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:412
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:418
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
iterator erase(const_iterator CI)
Definition: SmallVector.h:750
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
typename SuperClass::iterator iterator
Definition: SmallVector.h:590
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:289
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Definition: TargetFolder.h:34
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:249
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
Value * getOperand(unsigned i) const
Definition: User.h:169
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:693
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:851
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:199
constexpr bool isZero() const
Definition: TypeSize.h:156
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:135
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1484
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:972
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:816
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
Definition: PatternMatch.h:186
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Definition: PatternMatch.h:560
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition: PatternMatch.h:305
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:854
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
Exact_match< T > m_Exact(const T &SubPattern)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
Definition: PatternMatch.h:791
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:480
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:853
void stable_sort(R &&Range)
Definition: STLExtras.h:1995
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2807
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2400
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2246
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:145
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1665
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2789
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
Definition: bit.h:146
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:400
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition: Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:48
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1647
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1918
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Definition: Local.cpp:1693
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2717
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:336
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition: STLExtras.h:2025
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:281
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
static unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:317
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:240
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:237
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SimplifyQuery getWithInstruction(const Instruction *I) const
Definition: SimplifyQuery.h:96
SimplifyQuery getWithoutUndef() const