LLVM 20.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APInt.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
41#include "llvm/ADT/Statistic.h"
46#include "llvm/Analysis/CFG.h"
61#include "llvm/IR/BasicBlock.h"
62#include "llvm/IR/CFG.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DIBuilder.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
69#include "llvm/IR/Dominators.h"
71#include "llvm/IR/Function.h"
73#include "llvm/IR/IRBuilder.h"
74#include "llvm/IR/InstrTypes.h"
75#include "llvm/IR/Instruction.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/Metadata.h"
80#include "llvm/IR/Operator.h"
81#include "llvm/IR/PassManager.h"
83#include "llvm/IR/Type.h"
84#include "llvm/IR/Use.h"
85#include "llvm/IR/User.h"
86#include "llvm/IR/Value.h"
87#include "llvm/IR/ValueHandle.h"
92#include "llvm/Support/Debug.h"
100#include <algorithm>
101#include <cassert>
102#include <cstdint>
103#include <memory>
104#include <optional>
105#include <string>
106#include <utility>
107
108#define DEBUG_TYPE "instcombine"
110#include <optional>
111
112using namespace llvm;
113using namespace llvm::PatternMatch;
114
115STATISTIC(NumWorklistIterations,
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration, "Number of functions with one iteration");
118STATISTIC(NumTwoIterations, "Number of functions with two iterations");
119STATISTIC(NumThreeIterations, "Number of functions with three iterations");
120STATISTIC(NumFourOrMoreIterations,
121 "Number of functions with four or more iterations");
122
123STATISTIC(NumCombined , "Number of insts combined");
124STATISTIC(NumConstProp, "Number of constant folds");
125STATISTIC(NumDeadInst , "Number of dead inst eliminated");
126STATISTIC(NumSunkInst , "Number of instructions sunk");
127STATISTIC(NumExpand, "Number of expansions");
128STATISTIC(NumFactor , "Number of factorizations");
129STATISTIC(NumReassoc , "Number of reassociations");
130DEBUG_COUNTER(VisitCounter, "instcombine-visit",
131 "Controls which instructions are visited");
132
133static cl::opt<bool>
134EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
135 cl::init(true));
136
138 "instcombine-max-sink-users", cl::init(32),
139 cl::desc("Maximum number of undroppable users for instruction sinking"));
140
142MaxArraySize("instcombine-maxarray-size", cl::init(1024),
143 cl::desc("Maximum array size considered when doing a combine"));
144
145// FIXME: Remove this flag when it is no longer necessary to convert
146// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
147// increases variable availability at the cost of accuracy. Variables that
148// cannot be promoted by mem2reg or SROA will be described as living in memory
149// for their entire lifetime. However, passes like DSE and instcombine can
150// delete stores to the alloca, leading to misleading and inaccurate debug
151// information. This flag can be removed when those passes are fixed.
152static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
153 cl::Hidden, cl::init(true));
154
155std::optional<Instruction *>
157 // Handle target specific intrinsics
158 if (II.getCalledFunction()->isTargetIntrinsic()) {
159 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
160 }
161 return std::nullopt;
162}
163
165 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
166 bool &KnownBitsComputed) {
167 // Handle target specific intrinsics
168 if (II.getCalledFunction()->isTargetIntrinsic()) {
169 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
170 *this, II, DemandedMask, Known, KnownBitsComputed);
171 }
172 return std::nullopt;
173}
174
176 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
177 APInt &PoisonElts2, APInt &PoisonElts3,
178 std::function<void(Instruction *, unsigned, APInt, APInt &)>
179 SimplifyAndSetOp) {
180 // Handle target specific intrinsics
181 if (II.getCalledFunction()->isTargetIntrinsic()) {
182 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
183 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
184 SimplifyAndSetOp);
185 }
186 return std::nullopt;
187}
188
189bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
190 // Approved exception for TTI use: This queries a legality property of the
191 // target, not an profitability heuristic. Ideally this should be part of
192 // DataLayout instead.
193 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
194}
195
196Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
197 if (!RewriteGEP)
199
201 auto *Inst = dyn_cast<Instruction>(GEP);
202 if (Inst)
204
205 Value *Offset = EmitGEPOffset(GEP);
206 // If a non-trivial GEP has other uses, rewrite it to avoid duplicating
207 // the offset arithmetic.
208 if (Inst && !GEP->hasOneUse() && !GEP->hasAllConstantIndices() &&
209 !GEP->getSourceElementType()->isIntegerTy(8)) {
211 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
212 Offset, "", GEP->getNoWrapFlags()));
214 }
215 return Offset;
216}
217
218/// Legal integers and common types are considered desirable. This is used to
219/// avoid creating instructions with types that may not be supported well by the
220/// the backend.
221/// NOTE: This treats i8, i16 and i32 specially because they are common
222/// types in frontend languages.
223bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
224 switch (BitWidth) {
225 case 8:
226 case 16:
227 case 32:
228 return true;
229 default:
230 return DL.isLegalInteger(BitWidth);
231 }
232}
233
234/// Return true if it is desirable to convert an integer computation from a
235/// given bit width to a new bit width.
236/// We don't want to convert from a legal or desirable type (like i8) to an
237/// illegal type or from a smaller to a larger illegal type. A width of '1'
238/// is always treated as a desirable type because i1 is a fundamental type in
239/// IR, and there are many specialized optimizations for i1 types.
240/// Common/desirable widths are equally treated as legal to convert to, in
241/// order to open up more combining opportunities.
242bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
243 unsigned ToWidth) const {
244 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
245 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
246
247 // Convert to desirable widths even if they are not legal types.
248 // Only shrink types, to prevent infinite loops.
249 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
250 return true;
251
252 // If this is a legal or desiable integer from type, and the result would be
253 // an illegal type, don't do the transformation.
254 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
255 return false;
256
257 // Otherwise, if both are illegal, do not increase the size of the result. We
258 // do allow things like i160 -> i64, but not i64 -> i160.
259 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
260 return false;
261
262 return true;
263}
264
265/// Return true if it is desirable to convert a computation from 'From' to 'To'.
266/// We don't want to convert from a legal to an illegal type or from a smaller
267/// to a larger illegal type. i1 is always treated as a legal type because it is
268/// a fundamental type in IR, and there are many specialized optimizations for
269/// i1 types.
270bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
271 // TODO: This could be extended to allow vectors. Datalayout changes might be
272 // needed to properly support that.
273 if (!From->isIntegerTy() || !To->isIntegerTy())
274 return false;
275
276 unsigned FromWidth = From->getPrimitiveSizeInBits();
277 unsigned ToWidth = To->getPrimitiveSizeInBits();
278 return shouldChangeType(FromWidth, ToWidth);
279}
280
281// Return true, if No Signed Wrap should be maintained for I.
282// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
283// where both B and C should be ConstantInts, results in a constant that does
284// not overflow. This function only handles the Add and Sub opcodes. For
285// all other opcodes, the function conservatively returns false.
287 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
288 if (!OBO || !OBO->hasNoSignedWrap())
289 return false;
290
291 // We reason about Add and Sub Only.
292 Instruction::BinaryOps Opcode = I.getOpcode();
293 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
294 return false;
295
296 const APInt *BVal, *CVal;
297 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
298 return false;
299
300 bool Overflow = false;
301 if (Opcode == Instruction::Add)
302 (void)BVal->sadd_ov(*CVal, Overflow);
303 else
304 (void)BVal->ssub_ov(*CVal, Overflow);
305
306 return !Overflow;
307}
308
310 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
311 return OBO && OBO->hasNoUnsignedWrap();
312}
313
315 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
316 return OBO && OBO->hasNoSignedWrap();
317}
318
319/// Conservatively clears subclassOptionalData after a reassociation or
320/// commutation. We preserve fast-math flags when applicable as they can be
321/// preserved.
323 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
324 if (!FPMO) {
325 I.clearSubclassOptionalData();
326 return;
327 }
328
329 FastMathFlags FMF = I.getFastMathFlags();
330 I.clearSubclassOptionalData();
331 I.setFastMathFlags(FMF);
332}
333
334/// Combine constant operands of associative operations either before or after a
335/// cast to eliminate one of the associative operations:
336/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
337/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
339 InstCombinerImpl &IC) {
340 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
341 if (!Cast || !Cast->hasOneUse())
342 return false;
343
344 // TODO: Enhance logic for other casts and remove this check.
345 auto CastOpcode = Cast->getOpcode();
346 if (CastOpcode != Instruction::ZExt)
347 return false;
348
349 // TODO: Enhance logic for other BinOps and remove this check.
350 if (!BinOp1->isBitwiseLogicOp())
351 return false;
352
353 auto AssocOpcode = BinOp1->getOpcode();
354 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
355 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
356 return false;
357
358 Constant *C1, *C2;
359 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
360 !match(BinOp2->getOperand(1), m_Constant(C2)))
361 return false;
362
363 // TODO: This assumes a zext cast.
364 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
365 // to the destination type might lose bits.
366
367 // Fold the constants together in the destination type:
368 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
369 const DataLayout &DL = IC.getDataLayout();
370 Type *DestTy = C1->getType();
371 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
372 if (!CastC2)
373 return false;
374 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
375 if (!FoldedC)
376 return false;
377
378 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
379 IC.replaceOperand(*BinOp1, 1, FoldedC);
381 Cast->dropPoisonGeneratingFlags();
382 return true;
383}
384
385// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
386// inttoptr ( ptrtoint (x) ) --> x
387Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
388 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
389 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
390 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
391 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
392 Type *CastTy = IntToPtr->getDestTy();
393 if (PtrToInt &&
394 CastTy->getPointerAddressSpace() ==
395 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
396 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
397 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
398 return PtrToInt->getOperand(0);
399 }
400 return nullptr;
401}
402
403/// This performs a few simplifications for operators that are associative or
404/// commutative:
405///
406/// Commutative operators:
407///
408/// 1. Order operands such that they are listed from right (least complex) to
409/// left (most complex). This puts constants before unary operators before
410/// binary operators.
411///
412/// Associative operators:
413///
414/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
415/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
416///
417/// Associative and commutative operators:
418///
419/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
420/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
421/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
422/// if C1 and C2 are constants.
424 Instruction::BinaryOps Opcode = I.getOpcode();
425 bool Changed = false;
426
427 do {
428 // Order operands such that they are listed from right (least complex) to
429 // left (most complex). This puts constants before unary operators before
430 // binary operators.
431 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
432 getComplexity(I.getOperand(1)))
433 Changed = !I.swapOperands();
434
435 if (I.isCommutative()) {
436 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
437 replaceOperand(I, 0, Pair->first);
438 replaceOperand(I, 1, Pair->second);
439 Changed = true;
440 }
441 }
442
443 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
444 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
445
446 if (I.isAssociative()) {
447 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
448 if (Op0 && Op0->getOpcode() == Opcode) {
449 Value *A = Op0->getOperand(0);
450 Value *B = Op0->getOperand(1);
451 Value *C = I.getOperand(1);
452
453 // Does "B op C" simplify?
454 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
455 // It simplifies to V. Form "A op V".
456 replaceOperand(I, 0, A);
457 replaceOperand(I, 1, V);
458 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
459 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
460
461 // Conservatively clear all optional flags since they may not be
462 // preserved by the reassociation. Reset nsw/nuw based on the above
463 // analysis.
465
466 // Note: this is only valid because SimplifyBinOp doesn't look at
467 // the operands to Op0.
468 if (IsNUW)
469 I.setHasNoUnsignedWrap(true);
470
471 if (IsNSW)
472 I.setHasNoSignedWrap(true);
473
474 Changed = true;
475 ++NumReassoc;
476 continue;
477 }
478 }
479
480 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
481 if (Op1 && Op1->getOpcode() == Opcode) {
482 Value *A = I.getOperand(0);
483 Value *B = Op1->getOperand(0);
484 Value *C = Op1->getOperand(1);
485
486 // Does "A op B" simplify?
487 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
488 // It simplifies to V. Form "V op C".
489 replaceOperand(I, 0, V);
490 replaceOperand(I, 1, C);
491 // Conservatively clear the optional flags, since they may not be
492 // preserved by the reassociation.
494 Changed = true;
495 ++NumReassoc;
496 continue;
497 }
498 }
499 }
500
501 if (I.isAssociative() && I.isCommutative()) {
502 if (simplifyAssocCastAssoc(&I, *this)) {
503 Changed = true;
504 ++NumReassoc;
505 continue;
506 }
507
508 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
509 if (Op0 && Op0->getOpcode() == Opcode) {
510 Value *A = Op0->getOperand(0);
511 Value *B = Op0->getOperand(1);
512 Value *C = I.getOperand(1);
513
514 // Does "C op A" simplify?
515 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
516 // It simplifies to V. Form "V op B".
517 replaceOperand(I, 0, V);
518 replaceOperand(I, 1, B);
519 // Conservatively clear the optional flags, since they may not be
520 // preserved by the reassociation.
522 Changed = true;
523 ++NumReassoc;
524 continue;
525 }
526 }
527
528 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
529 if (Op1 && Op1->getOpcode() == Opcode) {
530 Value *A = I.getOperand(0);
531 Value *B = Op1->getOperand(0);
532 Value *C = Op1->getOperand(1);
533
534 // Does "C op A" simplify?
535 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
536 // It simplifies to V. Form "B op V".
537 replaceOperand(I, 0, B);
538 replaceOperand(I, 1, V);
539 // Conservatively clear the optional flags, since they may not be
540 // preserved by the reassociation.
542 Changed = true;
543 ++NumReassoc;
544 continue;
545 }
546 }
547
548 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
549 // if C1 and C2 are constants.
550 Value *A, *B;
551 Constant *C1, *C2, *CRes;
552 if (Op0 && Op1 &&
553 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
554 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
555 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
556 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
557 bool IsNUW = hasNoUnsignedWrap(I) &&
558 hasNoUnsignedWrap(*Op0) &&
559 hasNoUnsignedWrap(*Op1);
560 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
561 BinaryOperator::CreateNUW(Opcode, A, B) :
562 BinaryOperator::Create(Opcode, A, B);
563
564 if (isa<FPMathOperator>(NewBO)) {
565 FastMathFlags Flags = I.getFastMathFlags() &
566 Op0->getFastMathFlags() &
567 Op1->getFastMathFlags();
568 NewBO->setFastMathFlags(Flags);
569 }
570 InsertNewInstWith(NewBO, I.getIterator());
571 NewBO->takeName(Op1);
572 replaceOperand(I, 0, NewBO);
573 replaceOperand(I, 1, CRes);
574 // Conservatively clear the optional flags, since they may not be
575 // preserved by the reassociation.
577 if (IsNUW)
578 I.setHasNoUnsignedWrap(true);
579
580 Changed = true;
581 continue;
582 }
583 }
584
585 // No further simplifications.
586 return Changed;
587 } while (true);
588}
589
590/// Return whether "X LOp (Y ROp Z)" is always equal to
591/// "(X LOp Y) ROp (X LOp Z)".
594 // X & (Y | Z) <--> (X & Y) | (X & Z)
595 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
596 if (LOp == Instruction::And)
597 return ROp == Instruction::Or || ROp == Instruction::Xor;
598
599 // X | (Y & Z) <--> (X | Y) & (X | Z)
600 if (LOp == Instruction::Or)
601 return ROp == Instruction::And;
602
603 // X * (Y + Z) <--> (X * Y) + (X * Z)
604 // X * (Y - Z) <--> (X * Y) - (X * Z)
605 if (LOp == Instruction::Mul)
606 return ROp == Instruction::Add || ROp == Instruction::Sub;
607
608 return false;
609}
610
611/// Return whether "(X LOp Y) ROp Z" is always equal to
612/// "(X ROp Z) LOp (Y ROp Z)".
616 return leftDistributesOverRight(ROp, LOp);
617
618 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
620
621 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
622 // but this requires knowing that the addition does not overflow and other
623 // such subtleties.
624}
625
626/// This function returns identity value for given opcode, which can be used to
627/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
629 if (isa<Constant>(V))
630 return nullptr;
631
632 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
633}
634
635/// This function predicates factorization using distributive laws. By default,
636/// it just returns the 'Op' inputs. But for special-cases like
637/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
638/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
639/// allow more factorization opportunities.
642 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
643 assert(Op && "Expected a binary operator");
644 LHS = Op->getOperand(0);
645 RHS = Op->getOperand(1);
646 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
647 Constant *C;
648 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
649 // X << C --> X * (1 << C)
651 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
652 assert(RHS && "Constant folding of immediate constants failed");
653 return Instruction::Mul;
654 }
655 // TODO: We can add other conversions e.g. shr => div etc.
656 }
657 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
658 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
660 // lshr nneg C, X --> ashr nneg C, X
661 return Instruction::AShr;
662 }
663 }
664 return Op->getOpcode();
665}
666
667/// This tries to simplify binary operations by factorizing out common terms
668/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
671 Instruction::BinaryOps InnerOpcode, Value *A,
672 Value *B, Value *C, Value *D) {
673 assert(A && B && C && D && "All values must be provided");
674
675 Value *V = nullptr;
676 Value *RetVal = nullptr;
677 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
678 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
679
680 // Does "X op' Y" always equal "Y op' X"?
681 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
682
683 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
684 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
685 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
686 // commutative case, "(A op' B) op (C op' A)"?
687 if (A == C || (InnerCommutative && A == D)) {
688 if (A != C)
689 std::swap(C, D);
690 // Consider forming "A op' (B op D)".
691 // If "B op D" simplifies then it can be formed with no cost.
692 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
693
694 // If "B op D" doesn't simplify then only go on if one of the existing
695 // operations "A op' B" and "C op' D" will be zapped as no longer used.
696 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
697 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
698 if (V)
699 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
700 }
701 }
702
703 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
704 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
705 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
706 // commutative case, "(A op' B) op (B op' D)"?
707 if (B == D || (InnerCommutative && B == C)) {
708 if (B != D)
709 std::swap(C, D);
710 // Consider forming "(A op C) op' B".
711 // If "A op C" simplifies then it can be formed with no cost.
712 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
713
714 // If "A op C" doesn't simplify then only go on if one of the existing
715 // operations "A op' B" and "C op' D" will be zapped as no longer used.
716 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
717 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
718 if (V)
719 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
720 }
721 }
722
723 if (!RetVal)
724 return nullptr;
725
726 ++NumFactor;
727 RetVal->takeName(&I);
728
729 // Try to add no-overflow flags to the final value.
730 if (isa<OverflowingBinaryOperator>(RetVal)) {
731 bool HasNSW = false;
732 bool HasNUW = false;
733 if (isa<OverflowingBinaryOperator>(&I)) {
734 HasNSW = I.hasNoSignedWrap();
735 HasNUW = I.hasNoUnsignedWrap();
736 }
737 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
738 HasNSW &= LOBO->hasNoSignedWrap();
739 HasNUW &= LOBO->hasNoUnsignedWrap();
740 }
741
742 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
743 HasNSW &= ROBO->hasNoSignedWrap();
744 HasNUW &= ROBO->hasNoUnsignedWrap();
745 }
746
747 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
748 // We can propagate 'nsw' if we know that
749 // %Y = mul nsw i16 %X, C
750 // %Z = add nsw i16 %Y, %X
751 // =>
752 // %Z = mul nsw i16 %X, C+1
753 //
754 // iff C+1 isn't INT_MIN
755 const APInt *CInt;
756 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
757 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
758
759 // nuw can be propagated with any constant or nuw value.
760 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
761 }
762 }
763 return RetVal;
764}
765
766// If `I` has one Const operand and the other matches `(ctpop (not x))`,
767// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
768// This is only useful is the new subtract can fold so we only handle the
769// following cases:
770// 1) (add/sub/disjoint_or C, (ctpop (not x))
771// -> (add/sub/disjoint_or C', (ctpop x))
772// 1) (cmp pred C, (ctpop (not x))
773// -> (cmp pred C', (ctpop x))
775 unsigned Opc = I->getOpcode();
776 unsigned ConstIdx = 1;
777 switch (Opc) {
778 default:
779 return nullptr;
780 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
781 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
782 // is constant.
783 case Instruction::Sub:
784 ConstIdx = 0;
785 break;
786 case Instruction::ICmp:
787 // Signed predicates aren't correct in some edge cases like for i2 types, as
788 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
789 // comparisons against it are simplfied to unsigned.
790 if (cast<ICmpInst>(I)->isSigned())
791 return nullptr;
792 break;
793 case Instruction::Or:
794 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
795 return nullptr;
796 [[fallthrough]];
797 case Instruction::Add:
798 break;
799 }
800
801 Value *Op;
802 // Find ctpop.
803 if (!match(I->getOperand(1 - ConstIdx),
804 m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(Op)))))
805 return nullptr;
806
807 Constant *C;
808 // Check other operand is ImmConstant.
809 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
810 return nullptr;
811
812 Type *Ty = Op->getType();
813 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
814 // Need extra check for icmp. Note if this check is true, it generally means
815 // the icmp will simplify to true/false.
816 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
817 Constant *Cmp =
819 if (!Cmp || !Cmp->isZeroValue())
820 return nullptr;
821 }
822
823 // Check we can invert `(not x)` for free.
824 bool Consumes = false;
825 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
826 return nullptr;
827 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
828 assert(NotOp != nullptr &&
829 "Desync between isFreeToInvert and getFreelyInverted");
830
831 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
832
833 Value *R = nullptr;
834
835 // Do the transformation here to avoid potentially introducing an infinite
836 // loop.
837 switch (Opc) {
838 case Instruction::Sub:
839 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
840 break;
841 case Instruction::Or:
842 case Instruction::Add:
843 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
844 break;
845 case Instruction::ICmp:
846 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
847 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
848 break;
849 default:
850 llvm_unreachable("Unhandled Opcode");
851 }
852 assert(R != nullptr);
853 return replaceInstUsesWith(*I, R);
854}
855
856// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
857// IFF
858// 1) the logic_shifts match
859// 2) either both binops are binops and one is `and` or
860// BinOp1 is `and`
861// (logic_shift (inv_logic_shift C1, C), C) == C1 or
862//
863// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
864//
865// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
866// IFF
867// 1) the logic_shifts match
868// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
869//
870// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
871//
872// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
873// IFF
874// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
875// 2) Binop2 is `not`
876//
877// -> (arithmetic_shift Binop1((not X), Y), Amt)
878
880 const DataLayout &DL = I.getDataLayout();
881 auto IsValidBinOpc = [](unsigned Opc) {
882 switch (Opc) {
883 default:
884 return false;
885 case Instruction::And:
886 case Instruction::Or:
887 case Instruction::Xor:
888 case Instruction::Add:
889 // Skip Sub as we only match constant masks which will canonicalize to use
890 // add.
891 return true;
892 }
893 };
894
895 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
896 // constraints.
897 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
898 unsigned ShOpc) {
899 assert(ShOpc != Instruction::AShr);
900 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
901 ShOpc == Instruction::Shl;
902 };
903
904 auto GetInvShift = [](unsigned ShOpc) {
905 assert(ShOpc != Instruction::AShr);
906 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
907 };
908
909 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
910 unsigned ShOpc, Constant *CMask,
911 Constant *CShift) {
912 // If the BinOp1 is `and` we don't need to check the mask.
913 if (BinOpc1 == Instruction::And)
914 return true;
915
916 // For all other possible transfers we need complete distributable
917 // binop/shift (anything but `add` + `lshr`).
918 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
919 return false;
920
921 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
922 // vecs, otherwise the mask will be simplified and the following check will
923 // handle it).
924 if (BinOpc2 == Instruction::And)
925 return true;
926
927 // Otherwise, need mask that meets the below requirement.
928 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
929 Constant *MaskInvShift =
930 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
931 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
932 CMask;
933 };
934
935 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
936 Constant *CMask, *CShift;
937 Value *X, *Y, *ShiftedX, *Mask, *Shift;
938 if (!match(I.getOperand(ShOpnum),
939 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
940 return nullptr;
941 if (!match(I.getOperand(1 - ShOpnum),
944 m_Value(ShiftedX)),
945 m_Value(Mask))))
946 return nullptr;
947 // Make sure we are matching instruction shifts and not ConstantExpr
948 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
949 auto *IX = dyn_cast<Instruction>(ShiftedX);
950 if (!IY || !IX)
951 return nullptr;
952
953 // LHS and RHS need same shift opcode
954 unsigned ShOpc = IY->getOpcode();
955 if (ShOpc != IX->getOpcode())
956 return nullptr;
957
958 // Make sure binop is real instruction and not ConstantExpr
959 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
960 if (!BO2)
961 return nullptr;
962
963 unsigned BinOpc = BO2->getOpcode();
964 // Make sure we have valid binops.
965 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
966 return nullptr;
967
968 if (ShOpc == Instruction::AShr) {
969 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
970 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
971 Value *NotX = Builder.CreateNot(X);
972 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
974 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
975 }
976
977 return nullptr;
978 }
979
980 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
981 // distribute to drop the shift irrelevant of constants.
982 if (BinOpc == I.getOpcode() &&
983 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
984 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
985 Value *NewBinOp1 = Builder.CreateBinOp(
986 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
987 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
988 }
989
990 // Otherwise we can only distribute by constant shifting the mask, so
991 // ensure we have constants.
992 if (!match(Shift, m_ImmConstant(CShift)))
993 return nullptr;
994 if (!match(Mask, m_ImmConstant(CMask)))
995 return nullptr;
996
997 // Check if we can distribute the binops.
998 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
999 return nullptr;
1000
1001 Constant *NewCMask =
1002 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1003 Value *NewBinOp2 = Builder.CreateBinOp(
1004 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1005 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1006 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1007 NewBinOp1, CShift);
1008 };
1009
1010 if (Instruction *R = MatchBinOp(0))
1011 return R;
1012 return MatchBinOp(1);
1013}
1014
1015// (Binop (zext C), (select C, T, F))
1016// -> (select C, (binop 1, T), (binop 0, F))
1017//
1018// (Binop (sext C), (select C, T, F))
1019// -> (select C, (binop -1, T), (binop 0, F))
1020//
1021// Attempt to simplify binary operations into a select with folded args, when
1022// one operand of the binop is a select instruction and the other operand is a
1023// zext/sext extension, whose value is the select condition.
1026 // TODO: this simplification may be extended to any speculatable instruction,
1027 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1028 Instruction::BinaryOps Opc = I.getOpcode();
1029 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1030 Value *A, *CondVal, *TrueVal, *FalseVal;
1031 Value *CastOp;
1032
1033 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1034 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1035 A->getType()->getScalarSizeInBits() == 1 &&
1036 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1037 m_Value(FalseVal)));
1038 };
1039
1040 // Make sure one side of the binop is a select instruction, and the other is a
1041 // zero/sign extension operating on a i1.
1042 if (MatchSelectAndCast(LHS, RHS))
1043 CastOp = LHS;
1044 else if (MatchSelectAndCast(RHS, LHS))
1045 CastOp = RHS;
1046 else
1047 return nullptr;
1048
1049 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1050 bool IsCastOpRHS = (CastOp == RHS);
1051 bool IsZExt = isa<ZExtInst>(CastOp);
1052 Constant *C;
1053
1054 if (IsTrueArm) {
1055 C = Constant::getNullValue(V->getType());
1056 } else if (IsZExt) {
1057 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1058 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1059 } else {
1060 C = Constant::getAllOnesValue(V->getType());
1061 }
1062
1063 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1064 : Builder.CreateBinOp(Opc, C, V);
1065 };
1066
1067 // If the value used in the zext/sext is the select condition, or the negated
1068 // of the select condition, the binop can be simplified.
1069 if (CondVal == A) {
1070 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1071 return SelectInst::Create(CondVal, NewTrueVal,
1072 NewFoldedConst(true, FalseVal));
1073 }
1074
1075 if (match(A, m_Not(m_Specific(CondVal)))) {
1076 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1077 return SelectInst::Create(CondVal, NewTrueVal,
1078 NewFoldedConst(false, FalseVal));
1079 }
1080
1081 return nullptr;
1082}
1083
1085 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1086 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1087 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1088 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1089 Value *A, *B, *C, *D;
1090 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1091
1092 if (Op0)
1093 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1094 if (Op1)
1095 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1096
1097 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1098 // a common term.
1099 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1100 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1101 return V;
1102
1103 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1104 // term.
1105 if (Op0)
1106 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1107 if (Value *V =
1108 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1109 return V;
1110
1111 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1112 // term.
1113 if (Op1)
1114 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1115 if (Value *V =
1116 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1117 return V;
1118
1119 return nullptr;
1120}
1121
1122/// This tries to simplify binary operations which some other binary operation
1123/// distributes over either by factorizing out common terms
1124/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1125/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1126/// Returns the simplified value, or null if it didn't simplify.
1128 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1129 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1130 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1131 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1132
1133 // Factorization.
1134 if (Value *R = tryFactorizationFolds(I))
1135 return R;
1136
1137 // Expansion.
1138 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1139 // The instruction has the form "(A op' B) op C". See if expanding it out
1140 // to "(A op C) op' (B op C)" results in simplifications.
1141 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1142 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1143
1144 // Disable the use of undef because it's not safe to distribute undef.
1145 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1146 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1147 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1148
1149 // Do "A op C" and "B op C" both simplify?
1150 if (L && R) {
1151 // They do! Return "L op' R".
1152 ++NumExpand;
1153 C = Builder.CreateBinOp(InnerOpcode, L, R);
1154 C->takeName(&I);
1155 return C;
1156 }
1157
1158 // Does "A op C" simplify to the identity value for the inner opcode?
1159 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1160 // They do! Return "B op C".
1161 ++NumExpand;
1162 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1163 C->takeName(&I);
1164 return C;
1165 }
1166
1167 // Does "B op C" simplify to the identity value for the inner opcode?
1168 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1169 // They do! Return "A op C".
1170 ++NumExpand;
1171 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1172 C->takeName(&I);
1173 return C;
1174 }
1175 }
1176
1177 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1178 // The instruction has the form "A op (B op' C)". See if expanding it out
1179 // to "(A op B) op' (A op C)" results in simplifications.
1180 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1181 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1182
1183 // Disable the use of undef because it's not safe to distribute undef.
1184 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1185 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1186 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1187
1188 // Do "A op B" and "A op C" both simplify?
1189 if (L && R) {
1190 // They do! Return "L op' R".
1191 ++NumExpand;
1192 A = Builder.CreateBinOp(InnerOpcode, L, R);
1193 A->takeName(&I);
1194 return A;
1195 }
1196
1197 // Does "A op B" simplify to the identity value for the inner opcode?
1198 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1199 // They do! Return "A op C".
1200 ++NumExpand;
1201 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1202 A->takeName(&I);
1203 return A;
1204 }
1205
1206 // Does "A op C" simplify to the identity value for the inner opcode?
1207 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1208 // They do! Return "A op B".
1209 ++NumExpand;
1210 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1211 A->takeName(&I);
1212 return A;
1213 }
1214 }
1215
1217}
1218
1219static std::optional<std::pair<Value *, Value *>>
1221 if (LHS->getParent() != RHS->getParent())
1222 return std::nullopt;
1223
1224 if (LHS->getNumIncomingValues() < 2)
1225 return std::nullopt;
1226
1227 if (!equal(LHS->blocks(), RHS->blocks()))
1228 return std::nullopt;
1229
1230 Value *L0 = LHS->getIncomingValue(0);
1231 Value *R0 = RHS->getIncomingValue(0);
1232
1233 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1234 Value *L1 = LHS->getIncomingValue(I);
1235 Value *R1 = RHS->getIncomingValue(I);
1236
1237 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1238 continue;
1239
1240 return std::nullopt;
1241 }
1242
1243 return std::optional(std::pair(L0, R0));
1244}
1245
1246std::optional<std::pair<Value *, Value *>>
1247InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1248 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1249 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1250 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1251 return std::nullopt;
1252 switch (LHSInst->getOpcode()) {
1253 case Instruction::PHI:
1254 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS));
1255 case Instruction::Select: {
1256 Value *Cond = LHSInst->getOperand(0);
1257 Value *TrueVal = LHSInst->getOperand(1);
1258 Value *FalseVal = LHSInst->getOperand(2);
1259 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1260 FalseVal == RHSInst->getOperand(1))
1261 return std::pair(TrueVal, FalseVal);
1262 return std::nullopt;
1263 }
1264 case Instruction::Call: {
1265 // Match min(a, b) and max(a, b)
1266 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1267 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1268 if (LHSMinMax && RHSMinMax &&
1269 LHSMinMax->getPredicate() ==
1271 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1272 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1273 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1274 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1275 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1276 return std::nullopt;
1277 }
1278 default:
1279 return std::nullopt;
1280 }
1281}
1282
1284 Value *LHS,
1285 Value *RHS) {
1286 Value *A, *B, *C, *D, *E, *F;
1287 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1288 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1289 if (!LHSIsSelect && !RHSIsSelect)
1290 return nullptr;
1291
1292 FastMathFlags FMF;
1294 if (isa<FPMathOperator>(&I)) {
1295 FMF = I.getFastMathFlags();
1297 }
1298
1299 Instruction::BinaryOps Opcode = I.getOpcode();
1301
1302 Value *Cond, *True = nullptr, *False = nullptr;
1303
1304 // Special-case for add/negate combination. Replace the zero in the negation
1305 // with the trailing add operand:
1306 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1307 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1308 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1309 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1310 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1311 return nullptr;
1312
1313 Value *N;
1314 if (True && match(FVal, m_Neg(m_Value(N)))) {
1315 Value *Sub = Builder.CreateSub(Z, N);
1316 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1317 }
1318 if (False && match(TVal, m_Neg(m_Value(N)))) {
1319 Value *Sub = Builder.CreateSub(Z, N);
1320 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1321 }
1322 return nullptr;
1323 };
1324
1325 if (LHSIsSelect && RHSIsSelect && A == D) {
1326 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1327 Cond = A;
1328 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1329 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1330
1331 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1332 if (False && !True)
1333 True = Builder.CreateBinOp(Opcode, B, E);
1334 else if (True && !False)
1335 False = Builder.CreateBinOp(Opcode, C, F);
1336 }
1337 } else if (LHSIsSelect && LHS->hasOneUse()) {
1338 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1339 Cond = A;
1340 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1341 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1342 if (Value *NewSel = foldAddNegate(B, C, RHS))
1343 return NewSel;
1344 } else if (RHSIsSelect && RHS->hasOneUse()) {
1345 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1346 Cond = D;
1347 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1348 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1349 if (Value *NewSel = foldAddNegate(E, F, LHS))
1350 return NewSel;
1351 }
1352
1353 if (!True || !False)
1354 return nullptr;
1355
1356 Value *SI = Builder.CreateSelect(Cond, True, False);
1357 SI->takeName(&I);
1358 return SI;
1359}
1360
1361/// Freely adapt every user of V as-if V was changed to !V.
1362/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1364 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1365 for (User *U : make_early_inc_range(I->users())) {
1366 if (U == IgnoredUser)
1367 continue; // Don't consider this user.
1368 switch (cast<Instruction>(U)->getOpcode()) {
1369 case Instruction::Select: {
1370 auto *SI = cast<SelectInst>(U);
1371 SI->swapValues();
1372 SI->swapProfMetadata();
1373 break;
1374 }
1375 case Instruction::Br: {
1376 BranchInst *BI = cast<BranchInst>(U);
1377 BI->swapSuccessors(); // swaps prof metadata too
1378 if (BPI)
1380 break;
1381 }
1382 case Instruction::Xor:
1383 replaceInstUsesWith(cast<Instruction>(*U), I);
1384 // Add to worklist for DCE.
1385 addToWorklist(cast<Instruction>(U));
1386 break;
1387 default:
1388 llvm_unreachable("Got unexpected user - out of sync with "
1389 "canFreelyInvertAllUsersOf() ?");
1390 }
1391 }
1392}
1393
1394/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1395/// constant zero (which is the 'negate' form).
1396Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1397 Value *NegV;
1398 if (match(V, m_Neg(m_Value(NegV))))
1399 return NegV;
1400
1401 // Constants can be considered to be negated values if they can be folded.
1402 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1403 return ConstantExpr::getNeg(C);
1404
1405 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1406 if (C->getType()->getElementType()->isIntegerTy())
1407 return ConstantExpr::getNeg(C);
1408
1409 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1410 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1411 Constant *Elt = CV->getAggregateElement(i);
1412 if (!Elt)
1413 return nullptr;
1414
1415 if (isa<UndefValue>(Elt))
1416 continue;
1417
1418 if (!isa<ConstantInt>(Elt))
1419 return nullptr;
1420 }
1421 return ConstantExpr::getNeg(CV);
1422 }
1423
1424 // Negate integer vector splats.
1425 if (auto *CV = dyn_cast<Constant>(V))
1426 if (CV->getType()->isVectorTy() &&
1427 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1428 return ConstantExpr::getNeg(CV);
1429
1430 return nullptr;
1431}
1432
1433// Try to fold:
1434// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1435// -> ({s|u}itofp (int_binop x, y))
1436// 2) (fp_binop ({s|u}itofp x), FpC)
1437// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1438//
1439// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1440Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1441 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1443
1444 Type *FPTy = BO.getType();
1445 Type *IntTy = IntOps[0]->getType();
1446
1447 unsigned IntSz = IntTy->getScalarSizeInBits();
1448 // This is the maximum number of inuse bits by the integer where the int -> fp
1449 // casts are exact.
1450 unsigned MaxRepresentableBits =
1452
1453 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1454 // checks later on.
1455 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1456
1457 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1458 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1459 auto IsNonZero = [&](unsigned OpNo) -> bool {
1460 if (OpsKnown[OpNo].hasKnownBits() &&
1461 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1462 return true;
1463 return isKnownNonZero(IntOps[OpNo], SQ);
1464 };
1465
1466 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1467 // NB: This matches the impl in ValueTracking, we just try to use cached
1468 // knownbits here. If we ever start supporting WithCache for
1469 // `isKnownNonNegative`, change this to an explicit call.
1470 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1471 };
1472
1473 // Check if we know for certain that ({s|u}itofp op) is exact.
1474 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1475 // Can we treat this operand as the desired sign?
1476 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1477 !IsNonNeg(OpNo))
1478 return false;
1479
1480 // If fp precision >= bitwidth(op) then its exact.
1481 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1482 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1483 // handled specially. We can't, however, increase the bound arbitrarily for
1484 // `sitofp` as for larger sizes, it won't sign extend.
1485 if (MaxRepresentableBits < IntSz) {
1486 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1487 // numSignBits(op).
1488 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1489 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1490 if (OpsFromSigned)
1491 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1492 // Finally for unsigned check that fp precision >= bitwidth(op) -
1493 // numLeadingZeros(op).
1494 else {
1495 NumUsedLeadingBits[OpNo] =
1496 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1497 }
1498 }
1499 // NB: We could also check if op is known to be a power of 2 or zero (which
1500 // will always be representable). Its unlikely, however, that is we are
1501 // unable to bound op in any way we will be able to pass the overflow checks
1502 // later on.
1503
1504 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1505 return false;
1506 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1507 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1508 IsNonZero(OpNo);
1509 };
1510
1511 // If we have a constant rhs, see if we can losslessly convert it to an int.
1512 if (Op1FpC != nullptr) {
1513 // Signed + Mul req non-zero
1514 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1515 !match(Op1FpC, m_NonZeroFP()))
1516 return nullptr;
1517
1519 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1520 IntTy, DL);
1521 if (Op1IntC == nullptr)
1522 return nullptr;
1523 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1524 : Instruction::UIToFP,
1525 Op1IntC, FPTy, DL) != Op1FpC)
1526 return nullptr;
1527
1528 // First try to keep sign of cast the same.
1529 IntOps[1] = Op1IntC;
1530 }
1531
1532 // Ensure lhs/rhs integer types match.
1533 if (IntTy != IntOps[1]->getType())
1534 return nullptr;
1535
1536 if (Op1FpC == nullptr) {
1537 if (!IsValidPromotion(1))
1538 return nullptr;
1539 }
1540 if (!IsValidPromotion(0))
1541 return nullptr;
1542
1543 // Final we check if the integer version of the binop will not overflow.
1545 // Because of the precision check, we can often rule out overflows.
1546 bool NeedsOverflowCheck = true;
1547 // Try to conservatively rule out overflow based on the already done precision
1548 // checks.
1549 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1550 unsigned OverflowMaxCurBits =
1551 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1552 bool OutputSigned = OpsFromSigned;
1553 switch (BO.getOpcode()) {
1554 case Instruction::FAdd:
1555 IntOpc = Instruction::Add;
1556 OverflowMaxOutputBits += OverflowMaxCurBits;
1557 break;
1558 case Instruction::FSub:
1559 IntOpc = Instruction::Sub;
1560 OverflowMaxOutputBits += OverflowMaxCurBits;
1561 break;
1562 case Instruction::FMul:
1563 IntOpc = Instruction::Mul;
1564 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1565 break;
1566 default:
1567 llvm_unreachable("Unsupported binop");
1568 }
1569 // The precision check may have already ruled out overflow.
1570 if (OverflowMaxOutputBits < IntSz) {
1571 NeedsOverflowCheck = false;
1572 // We can bound unsigned overflow from sub to in range signed value (this is
1573 // what allows us to avoid the overflow check for sub).
1574 if (IntOpc == Instruction::Sub)
1575 OutputSigned = true;
1576 }
1577
1578 // Precision check did not rule out overflow, so need to check.
1579 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1580 // `IntOps[...]` arguments to `KnownOps[...]`.
1581 if (NeedsOverflowCheck &&
1582 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1583 return nullptr;
1584
1585 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1586 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1587 IntBO->setHasNoSignedWrap(OutputSigned);
1588 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1589 }
1590 if (OutputSigned)
1591 return new SIToFPInst(IntBinOp, FPTy);
1592 return new UIToFPInst(IntBinOp, FPTy);
1593}
1594
1595// Try to fold:
1596// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1597// -> ({s|u}itofp (int_binop x, y))
1598// 2) (fp_binop ({s|u}itofp x), FpC)
1599// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1600Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1601 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1602 Constant *Op1FpC = nullptr;
1603 // Check for:
1604 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1605 // 2) (binop ({s|u}itofp x), FpC)
1606 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1607 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1608 return nullptr;
1609
1610 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1611 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1612 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1613 return nullptr;
1614
1615 // Cache KnownBits a bit to potentially save some analysis.
1616 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1617
1618 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1619 // different constraints depending on the sign of the cast.
1620 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1621 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1622 IntOps, Op1FpC, OpsKnown))
1623 return R;
1624 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1625 Op1FpC, OpsKnown);
1626}
1627
1628/// A binop with a constant operand and a sign-extended boolean operand may be
1629/// converted into a select of constants by applying the binary operation to
1630/// the constant with the two possible values of the extended boolean (0 or -1).
1631Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1632 // TODO: Handle non-commutative binop (constant is operand 0).
1633 // TODO: Handle zext.
1634 // TODO: Peek through 'not' of cast.
1635 Value *BO0 = BO.getOperand(0);
1636 Value *BO1 = BO.getOperand(1);
1637 Value *X;
1638 Constant *C;
1639 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1640 !X->getType()->isIntOrIntVectorTy(1))
1641 return nullptr;
1642
1643 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1646 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1647 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1648 return SelectInst::Create(X, TVal, FVal);
1649}
1650
1652 bool IsTrueArm) {
1654 for (Value *Op : I.operands()) {
1655 Value *V = nullptr;
1656 if (Op == SI) {
1657 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1658 } else if (match(SI->getCondition(),
1661 m_Specific(Op), m_Value(V))) &&
1663 // Pass
1664 } else {
1665 V = Op;
1666 }
1667 Ops.push_back(V);
1668 }
1669
1670 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1671}
1672
1674 Value *NewOp, InstCombiner &IC) {
1675 Instruction *Clone = I.clone();
1676 Clone->replaceUsesOfWith(SI, NewOp);
1678 IC.InsertNewInstBefore(Clone, I.getIterator());
1679 return Clone;
1680}
1681
1683 bool FoldWithMultiUse) {
1684 // Don't modify shared select instructions unless set FoldWithMultiUse
1685 if (!SI->hasOneUse() && !FoldWithMultiUse)
1686 return nullptr;
1687
1688 Value *TV = SI->getTrueValue();
1689 Value *FV = SI->getFalseValue();
1690
1691 // Bool selects with constant operands can be folded to logical ops.
1692 if (SI->getType()->isIntOrIntVectorTy(1))
1693 return nullptr;
1694
1695 // Test if a FCmpInst instruction is used exclusively by a select as
1696 // part of a minimum or maximum operation. If so, refrain from doing
1697 // any other folding. This helps out other analyses which understand
1698 // non-obfuscated minimum and maximum idioms. And in this case, at
1699 // least one of the comparison operands has at least one user besides
1700 // the compare (the select), which would often largely negate the
1701 // benefit of folding anyway.
1702 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1703 if (CI->hasOneUse()) {
1704 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1705 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1706 return nullptr;
1707 }
1708 }
1709
1710 // Make sure that one of the select arms folds successfully.
1711 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1712 Value *NewFV =
1713 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1714 if (!NewTV && !NewFV)
1715 return nullptr;
1716
1717 // Create an instruction for the arm that did not fold.
1718 if (!NewTV)
1719 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1720 if (!NewFV)
1721 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1722 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1723}
1724
1726 Value *InValue, BasicBlock *InBB,
1727 const DataLayout &DL,
1728 const SimplifyQuery SQ) {
1729 // NB: It is a precondition of this transform that the operands be
1730 // phi translatable!
1732 for (Value *Op : I.operands()) {
1733 if (Op == PN)
1734 Ops.push_back(InValue);
1735 else
1736 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1737 }
1738
1739 // Don't consider the simplification successful if we get back a constant
1740 // expression. That's just an instruction in hiding.
1741 // Also reject the case where we simplify back to the phi node. We wouldn't
1742 // be able to remove it in that case.
1744 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1745 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1746 return NewVal;
1747
1748 // Check if incoming PHI value can be replaced with constant
1749 // based on implied condition.
1750 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1751 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1752 if (TerminatorBI && TerminatorBI->isConditional() &&
1753 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1754 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1755 std::optional<bool> ImpliedCond = isImpliedCondition(
1756 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1757 DL, LHSIsTrue);
1758 if (ImpliedCond)
1759 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1760 }
1761
1762 return nullptr;
1763}
1764
1766 bool AllowMultipleUses) {
1767 unsigned NumPHIValues = PN->getNumIncomingValues();
1768 if (NumPHIValues == 0)
1769 return nullptr;
1770
1771 // We normally only transform phis with a single use. However, if a PHI has
1772 // multiple uses and they are all the same operation, we can fold *all* of the
1773 // uses into the PHI.
1774 bool OneUse = PN->hasOneUse();
1775 bool IdenticalUsers = false;
1776 if (!AllowMultipleUses && !OneUse) {
1777 // Walk the use list for the instruction, comparing them to I.
1778 for (User *U : PN->users()) {
1779 Instruction *UI = cast<Instruction>(U);
1780 if (UI != &I && !I.isIdenticalTo(UI))
1781 return nullptr;
1782 }
1783 // Otherwise, we can replace *all* users with the new PHI we form.
1784 IdenticalUsers = true;
1785 }
1786
1787 // Check that all operands are phi-translatable.
1788 for (Value *Op : I.operands()) {
1789 if (Op == PN)
1790 continue;
1791
1792 // Non-instructions never require phi-translation.
1793 auto *I = dyn_cast<Instruction>(Op);
1794 if (!I)
1795 continue;
1796
1797 // Phi-translate can handle phi nodes in the same block.
1798 if (isa<PHINode>(I))
1799 if (I->getParent() == PN->getParent())
1800 continue;
1801
1802 // Operand dominates the block, no phi-translation necessary.
1803 if (DT.dominates(I, PN->getParent()))
1804 continue;
1805
1806 // Not phi-translatable, bail out.
1807 return nullptr;
1808 }
1809
1810 // Check to see whether the instruction can be folded into each phi operand.
1811 // If there is one operand that does not fold, remember the BB it is in.
1812 SmallVector<Value *> NewPhiValues;
1813 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1814 bool SeenNonSimplifiedInVal = false;
1815 for (unsigned i = 0; i != NumPHIValues; ++i) {
1816 Value *InVal = PN->getIncomingValue(i);
1817 BasicBlock *InBB = PN->getIncomingBlock(i);
1818
1819 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1820 NewPhiValues.push_back(NewVal);
1821 continue;
1822 }
1823
1824 // Handle some cases that can't be fully simplified, but where we know that
1825 // the two instructions will fold into one.
1826 auto WillFold = [&]() {
1827 if (!InVal->hasOneUser())
1828 return false;
1829
1830 // icmp of ucmp/scmp with constant will fold to icmp.
1831 const APInt *Ignored;
1832 if (isa<CmpIntrinsic>(InVal) &&
1833 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1834 return true;
1835
1836 // icmp eq zext(bool), 0 will fold to !bool.
1837 if (isa<ZExtInst>(InVal) &&
1838 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1839 match(&I,
1841 return true;
1842
1843 return false;
1844 };
1845
1846 if (WillFold()) {
1847 OpsToMoveUseToIncomingBB.push_back(i);
1848 NewPhiValues.push_back(nullptr);
1849 continue;
1850 }
1851
1852 if (!OneUse && !IdenticalUsers)
1853 return nullptr;
1854
1855 if (SeenNonSimplifiedInVal)
1856 return nullptr; // More than one non-simplified value.
1857 SeenNonSimplifiedInVal = true;
1858
1859 // If there is exactly one non-simplified value, we can insert a copy of the
1860 // operation in that block. However, if this is a critical edge, we would
1861 // be inserting the computation on some other paths (e.g. inside a loop).
1862 // Only do this if the pred block is unconditionally branching into the phi
1863 // block. Also, make sure that the pred block is not dead code.
1864 BranchInst *BI = dyn_cast<BranchInst>(InBB->getTerminator());
1865 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1866 return nullptr;
1867
1868 NewPhiValues.push_back(nullptr);
1869 OpsToMoveUseToIncomingBB.push_back(i);
1870
1871 // If the InVal is an invoke at the end of the pred block, then we can't
1872 // insert a computation after it without breaking the edge.
1873 if (isa<InvokeInst>(InVal))
1874 if (cast<Instruction>(InVal)->getParent() == InBB)
1875 return nullptr;
1876
1877 // Do not push the operation across a loop backedge. This could result in
1878 // an infinite combine loop, and is generally non-profitable (especially
1879 // if the operation was originally outside the loop).
1880 if (isBackEdge(InBB, PN->getParent()))
1881 return nullptr;
1882 }
1883
1884 // Clone the instruction that uses the phi node and move it into the incoming
1885 // BB because we know that the next iteration of InstCombine will simplify it.
1887 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1889 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1890
1891 Instruction *Clone = Clones.lookup(OpBB);
1892 if (!Clone) {
1893 Clone = I.clone();
1894 for (Use &U : Clone->operands()) {
1895 if (U == PN)
1896 U = Op;
1897 else
1898 U = U->DoPHITranslation(PN->getParent(), OpBB);
1899 }
1900 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1901 Clones.insert({OpBB, Clone});
1902 }
1903
1904 NewPhiValues[OpIndex] = Clone;
1905 }
1906
1907 // Okay, we can do the transformation: create the new PHI node.
1908 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1909 InsertNewInstBefore(NewPN, PN->getIterator());
1910 NewPN->takeName(PN);
1911 NewPN->setDebugLoc(PN->getDebugLoc());
1912
1913 for (unsigned i = 0; i != NumPHIValues; ++i)
1914 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1915
1916 if (IdenticalUsers) {
1917 for (User *U : make_early_inc_range(PN->users())) {
1918 Instruction *User = cast<Instruction>(U);
1919 if (User == &I)
1920 continue;
1921 replaceInstUsesWith(*User, NewPN);
1923 }
1924 OneUse = true;
1925 }
1926
1927 if (OneUse) {
1928 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1929 const_cast<PHINode &>(*NewPN),
1930 const_cast<PHINode &>(*PN), DT);
1931 }
1932 return replaceInstUsesWith(I, NewPN);
1933}
1934
1936 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1937 // we are guarding against replicating the binop in >1 predecessor.
1938 // This could miss matching a phi with 2 constant incoming values.
1939 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1940 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1941 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1942 Phi0->getNumOperands() != Phi1->getNumOperands())
1943 return nullptr;
1944
1945 // TODO: Remove the restriction for binop being in the same block as the phis.
1946 if (BO.getParent() != Phi0->getParent() ||
1947 BO.getParent() != Phi1->getParent())
1948 return nullptr;
1949
1950 // Fold if there is at least one specific constant value in phi0 or phi1's
1951 // incoming values that comes from the same block and this specific constant
1952 // value can be used to do optimization for specific binary operator.
1953 // For example:
1954 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1955 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1956 // %add = add i32 %phi0, %phi1
1957 // ==>
1958 // %add = phi i32 [%j, %bb0], [%i, %bb1]
1960 /*AllowRHSConstant*/ false);
1961 if (C) {
1962 SmallVector<Value *, 4> NewIncomingValues;
1963 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1964 auto &Phi0Use = std::get<0>(T);
1965 auto &Phi1Use = std::get<1>(T);
1966 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1967 return false;
1968 Value *Phi0UseV = Phi0Use.get();
1969 Value *Phi1UseV = Phi1Use.get();
1970 if (Phi0UseV == C)
1971 NewIncomingValues.push_back(Phi1UseV);
1972 else if (Phi1UseV == C)
1973 NewIncomingValues.push_back(Phi0UseV);
1974 else
1975 return false;
1976 return true;
1977 };
1978
1979 if (all_of(zip(Phi0->operands(), Phi1->operands()),
1980 CanFoldIncomingValuePair)) {
1981 PHINode *NewPhi =
1982 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1983 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1984 "The number of collected incoming values should equal the number "
1985 "of the original PHINode operands!");
1986 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1987 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1988 return NewPhi;
1989 }
1990 }
1991
1992 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1993 return nullptr;
1994
1995 // Match a pair of incoming constants for one of the predecessor blocks.
1996 BasicBlock *ConstBB, *OtherBB;
1997 Constant *C0, *C1;
1998 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
1999 ConstBB = Phi0->getIncomingBlock(0);
2000 OtherBB = Phi0->getIncomingBlock(1);
2001 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2002 ConstBB = Phi0->getIncomingBlock(1);
2003 OtherBB = Phi0->getIncomingBlock(0);
2004 } else {
2005 return nullptr;
2006 }
2007 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2008 return nullptr;
2009
2010 // The block that we are hoisting to must reach here unconditionally.
2011 // Otherwise, we could be speculatively executing an expensive or
2012 // non-speculative op.
2013 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2014 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2015 !DT.isReachableFromEntry(OtherBB))
2016 return nullptr;
2017
2018 // TODO: This check could be tightened to only apply to binops (div/rem) that
2019 // are not safe to speculatively execute. But that could allow hoisting
2020 // potentially expensive instructions (fdiv for example).
2021 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2023 return nullptr;
2024
2025 // Fold constants for the predecessor block with constant incoming values.
2026 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2027 if (!NewC)
2028 return nullptr;
2029
2030 // Make a new binop in the predecessor block with the non-constant incoming
2031 // values.
2032 Builder.SetInsertPoint(PredBlockBranch);
2033 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2034 Phi0->getIncomingValueForBlock(OtherBB),
2035 Phi1->getIncomingValueForBlock(OtherBB));
2036 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2037 NotFoldedNewBO->copyIRFlags(&BO);
2038
2039 // Replace the binop with a phi of the new values. The old phis are dead.
2040 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2041 NewPhi->addIncoming(NewBO, OtherBB);
2042 NewPhi->addIncoming(NewC, ConstBB);
2043 return NewPhi;
2044}
2045
2047 if (!isa<Constant>(I.getOperand(1)))
2048 return nullptr;
2049
2050 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2051 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2052 return NewSel;
2053 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2054 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2055 return NewPhi;
2056 }
2057 return nullptr;
2058}
2059
2061 // If this GEP has only 0 indices, it is the same pointer as
2062 // Src. If Src is not a trivial GEP too, don't combine
2063 // the indices.
2064 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2065 !Src.hasOneUse())
2066 return false;
2067 return true;
2068}
2069
2071 if (!isa<VectorType>(Inst.getType()))
2072 return nullptr;
2073
2074 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2075 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2076 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2077 cast<VectorType>(Inst.getType())->getElementCount());
2078 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2079 cast<VectorType>(Inst.getType())->getElementCount());
2080
2081 // If both operands of the binop are vector concatenations, then perform the
2082 // narrow binop on each pair of the source operands followed by concatenation
2083 // of the results.
2084 Value *L0, *L1, *R0, *R1;
2085 ArrayRef<int> Mask;
2086 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2087 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2088 LHS->hasOneUse() && RHS->hasOneUse() &&
2089 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2090 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2091 // This transform does not have the speculative execution constraint as
2092 // below because the shuffle is a concatenation. The new binops are
2093 // operating on exactly the same elements as the existing binop.
2094 // TODO: We could ease the mask requirement to allow different undef lanes,
2095 // but that requires an analysis of the binop-with-undef output value.
2096 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2097 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2098 BO->copyIRFlags(&Inst);
2099 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2100 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2101 BO->copyIRFlags(&Inst);
2102 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2103 }
2104
2105 auto createBinOpReverse = [&](Value *X, Value *Y) {
2106 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2107 if (auto *BO = dyn_cast<BinaryOperator>(V))
2108 BO->copyIRFlags(&Inst);
2109 Module *M = Inst.getModule();
2111 M, Intrinsic::vector_reverse, V->getType());
2112 return CallInst::Create(F, V);
2113 };
2114
2115 // NOTE: Reverse shuffles don't require the speculative execution protection
2116 // below because they don't affect which lanes take part in the computation.
2117
2118 Value *V1, *V2;
2119 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2120 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2121 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2122 (LHS->hasOneUse() || RHS->hasOneUse() ||
2123 (LHS == RHS && LHS->hasNUses(2))))
2124 return createBinOpReverse(V1, V2);
2125
2126 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2127 if (LHS->hasOneUse() && isSplatValue(RHS))
2128 return createBinOpReverse(V1, RHS);
2129 }
2130 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2131 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2132 return createBinOpReverse(LHS, V2);
2133
2134 // It may not be safe to reorder shuffles and things like div, urem, etc.
2135 // because we may trap when executing those ops on unknown vector elements.
2136 // See PR20059.
2138 return nullptr;
2139
2140 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2141 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2142 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2143 BO->copyIRFlags(&Inst);
2144 return new ShuffleVectorInst(XY, M);
2145 };
2146
2147 // If both arguments of the binary operation are shuffles that use the same
2148 // mask and shuffle within a single vector, move the shuffle after the binop.
2149 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2150 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2151 V1->getType() == V2->getType() &&
2152 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2153 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2154 return createBinOpShuffle(V1, V2, Mask);
2155 }
2156
2157 // If both arguments of a commutative binop are select-shuffles that use the
2158 // same mask with commuted operands, the shuffles are unnecessary.
2159 if (Inst.isCommutative() &&
2160 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2161 match(RHS,
2162 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2163 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2164 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2165 // TODO: Allow shuffles that contain undefs in the mask?
2166 // That is legal, but it reduces undef knowledge.
2167 // TODO: Allow arbitrary shuffles by shuffling after binop?
2168 // That might be legal, but we have to deal with poison.
2169 if (LShuf->isSelect() &&
2170 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2171 RShuf->isSelect() &&
2172 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2173 // Example:
2174 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2175 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2176 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2177 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2178 NewBO->copyIRFlags(&Inst);
2179 return NewBO;
2180 }
2181 }
2182
2183 // If one argument is a shuffle within one vector and the other is a constant,
2184 // try moving the shuffle after the binary operation. This canonicalization
2185 // intends to move shuffles closer to other shuffles and binops closer to
2186 // other binops, so they can be folded. It may also enable demanded elements
2187 // transforms.
2188 Constant *C;
2189 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
2190 if (InstVTy &&
2192 m_Mask(Mask))),
2193 m_ImmConstant(C))) &&
2194 cast<FixedVectorType>(V1->getType())->getNumElements() <=
2195 InstVTy->getNumElements()) {
2196 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
2197 "Shuffle should not change scalar type");
2198
2199 // Find constant NewC that has property:
2200 // shuffle(NewC, ShMask) = C
2201 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
2202 // reorder is not possible. A 1-to-1 mapping is not required. Example:
2203 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
2204 bool ConstOp1 = isa<Constant>(RHS);
2205 ArrayRef<int> ShMask = Mask;
2206 unsigned SrcVecNumElts =
2207 cast<FixedVectorType>(V1->getType())->getNumElements();
2208 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2209 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar);
2210 bool MayChange = true;
2211 unsigned NumElts = InstVTy->getNumElements();
2212 for (unsigned I = 0; I < NumElts; ++I) {
2213 Constant *CElt = C->getAggregateElement(I);
2214 if (ShMask[I] >= 0) {
2215 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2216 Constant *NewCElt = NewVecC[ShMask[I]];
2217 // Bail out if:
2218 // 1. The constant vector contains a constant expression.
2219 // 2. The shuffle needs an element of the constant vector that can't
2220 // be mapped to a new constant vector.
2221 // 3. This is a widening shuffle that copies elements of V1 into the
2222 // extended elements (extending with poison is allowed).
2223 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2224 I >= SrcVecNumElts) {
2225 MayChange = false;
2226 break;
2227 }
2228 NewVecC[ShMask[I]] = CElt;
2229 }
2230 // If this is a widening shuffle, we must be able to extend with poison
2231 // elements. If the original binop does not produce a poison in the high
2232 // lanes, then this transform is not safe.
2233 // Similarly for poison lanes due to the shuffle mask, we can only
2234 // transform binops that preserve poison.
2235 // TODO: We could shuffle those non-poison constant values into the
2236 // result by using a constant vector (rather than an poison vector)
2237 // as operand 1 of the new binop, but that might be too aggressive
2238 // for target-independent shuffle creation.
2239 if (I >= SrcVecNumElts || ShMask[I] < 0) {
2240 Constant *MaybePoison =
2241 ConstOp1
2242 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL)
2243 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL);
2244 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2245 MayChange = false;
2246 break;
2247 }
2248 }
2249 }
2250 if (MayChange) {
2251 Constant *NewC = ConstantVector::get(NewVecC);
2252 // It may not be safe to execute a binop on a vector with poison elements
2253 // because the entire instruction can be folded to undef or create poison
2254 // that did not exist in the original code.
2255 // TODO: The shift case should not be necessary.
2256 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
2257 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2258
2259 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2260 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2261 Value *NewLHS = ConstOp1 ? V1 : NewC;
2262 Value *NewRHS = ConstOp1 ? NewC : V1;
2263 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2264 }
2265 }
2266
2267 // Try to reassociate to sink a splat shuffle after a binary operation.
2268 if (Inst.isAssociative() && Inst.isCommutative()) {
2269 // Canonicalize shuffle operand as LHS.
2270 if (isa<ShuffleVectorInst>(RHS))
2271 std::swap(LHS, RHS);
2272
2273 Value *X;
2274 ArrayRef<int> MaskC;
2275 int SplatIndex;
2276 Value *Y, *OtherOp;
2277 if (!match(LHS,
2278 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2279 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2280 X->getType() != Inst.getType() ||
2281 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2282 return nullptr;
2283
2284 // FIXME: This may not be safe if the analysis allows undef elements. By
2285 // moving 'Y' before the splat shuffle, we are implicitly assuming
2286 // that it is not undef/poison at the splat index.
2287 if (isSplatValue(OtherOp, SplatIndex)) {
2288 std::swap(Y, OtherOp);
2289 } else if (!isSplatValue(Y, SplatIndex)) {
2290 return nullptr;
2291 }
2292
2293 // X and Y are splatted values, so perform the binary operation on those
2294 // values followed by a splat followed by the 2nd binary operation:
2295 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2296 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2297 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2298 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2299 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2300
2301 // Intersect FMF on both new binops. Other (poison-generating) flags are
2302 // dropped to be safe.
2303 if (isa<FPMathOperator>(R)) {
2304 R->copyFastMathFlags(&Inst);
2305 R->andIRFlags(RHS);
2306 }
2307 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2308 NewInstBO->copyIRFlags(R);
2309 return R;
2310 }
2311
2312 return nullptr;
2313}
2314
2315/// Try to narrow the width of a binop if at least 1 operand is an extend of
2316/// of a value. This requires a potentially expensive known bits check to make
2317/// sure the narrow op does not overflow.
2318Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2319 // We need at least one extended operand.
2320 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2321
2322 // If this is a sub, we swap the operands since we always want an extension
2323 // on the RHS. The LHS can be an extension or a constant.
2324 if (BO.getOpcode() == Instruction::Sub)
2325 std::swap(Op0, Op1);
2326
2327 Value *X;
2328 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2329 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2330 return nullptr;
2331
2332 // If both operands are the same extension from the same source type and we
2333 // can eliminate at least one (hasOneUse), this might work.
2334 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2335 Value *Y;
2336 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2337 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2338 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2339 // If that did not match, see if we have a suitable constant operand.
2340 // Truncating and extending must produce the same constant.
2341 Constant *WideC;
2342 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2343 return nullptr;
2344 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
2345 if (!NarrowC)
2346 return nullptr;
2347 Y = NarrowC;
2348 }
2349
2350 // Swap back now that we found our operands.
2351 if (BO.getOpcode() == Instruction::Sub)
2352 std::swap(X, Y);
2353
2354 // Both operands have narrow versions. Last step: the math must not overflow
2355 // in the narrow width.
2356 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2357 return nullptr;
2358
2359 // bo (ext X), (ext Y) --> ext (bo X, Y)
2360 // bo (ext X), C --> ext (bo X, C')
2361 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2362 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2363 if (IsSext)
2364 NewBinOp->setHasNoSignedWrap();
2365 else
2366 NewBinOp->setHasNoUnsignedWrap();
2367 }
2368 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2369}
2370
2371/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2372/// transform.
2374 GEPOperator &GEP2) {
2376}
2377
2378/// Thread a GEP operation with constant indices through the constant true/false
2379/// arms of a select.
2381 InstCombiner::BuilderTy &Builder) {
2382 if (!GEP.hasAllConstantIndices())
2383 return nullptr;
2384
2385 Instruction *Sel;
2386 Value *Cond;
2387 Constant *TrueC, *FalseC;
2388 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2389 !match(Sel,
2390 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2391 return nullptr;
2392
2393 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2394 // Propagate 'inbounds' and metadata from existing instructions.
2395 // Note: using IRBuilder to create the constants for efficiency.
2396 SmallVector<Value *, 4> IndexC(GEP.indices());
2397 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2398 Type *Ty = GEP.getSourceElementType();
2399 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2400 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2401 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2402}
2403
2404// Canonicalization:
2405// gep T, (gep i8, base, C1), (Index + C2) into
2406// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2408 GEPOperator *Src,
2409 InstCombinerImpl &IC) {
2410 if (GEP.getNumIndices() != 1)
2411 return nullptr;
2412 auto &DL = IC.getDataLayout();
2413 Value *Base;
2414 const APInt *C1;
2415 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2416 return nullptr;
2417 Value *VarIndex;
2418 const APInt *C2;
2419 Type *PtrTy = Src->getType()->getScalarType();
2420 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2421 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2422 return nullptr;
2423 if (C1->getBitWidth() != IndexSizeInBits ||
2424 C2->getBitWidth() != IndexSizeInBits)
2425 return nullptr;
2426 Type *BaseType = GEP.getSourceElementType();
2427 if (isa<ScalableVectorType>(BaseType))
2428 return nullptr;
2429 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2430 APInt NewOffset = TypeSize * *C2 + *C1;
2431 if (NewOffset.isZero() ||
2432 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2433 Value *GEPConst =
2434 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset));
2435 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex);
2436 }
2437
2438 return nullptr;
2439}
2440
2442 GEPOperator *Src) {
2443 // Combine Indices - If the source pointer to this getelementptr instruction
2444 // is a getelementptr instruction with matching element type, combine the
2445 // indices of the two getelementptr instructions into a single instruction.
2446 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2447 return nullptr;
2448
2449 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2450 return I;
2451
2452 // For constant GEPs, use a more general offset-based folding approach.
2453 Type *PtrTy = Src->getType()->getScalarType();
2454 if (GEP.hasAllConstantIndices() &&
2455 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2456 // Split Src into a variable part and a constant suffix.
2458 Type *BaseType = GTI.getIndexedType();
2459 bool IsFirstType = true;
2460 unsigned NumVarIndices = 0;
2461 for (auto Pair : enumerate(Src->indices())) {
2462 if (!isa<ConstantInt>(Pair.value())) {
2463 BaseType = GTI.getIndexedType();
2464 IsFirstType = false;
2465 NumVarIndices = Pair.index() + 1;
2466 }
2467 ++GTI;
2468 }
2469
2470 // Determine the offset for the constant suffix of Src.
2472 if (NumVarIndices != Src->getNumIndices()) {
2473 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2474 if (BaseType->isScalableTy())
2475 return nullptr;
2476
2477 SmallVector<Value *> ConstantIndices;
2478 if (!IsFirstType)
2479 ConstantIndices.push_back(
2481 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2482 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2483 }
2484
2485 // Add the offset for GEP (which is fully constant).
2486 if (!GEP.accumulateConstantOffset(DL, Offset))
2487 return nullptr;
2488
2489 // Convert the total offset back into indices.
2490 SmallVector<APInt> ConstIndices =
2492 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2493 return nullptr;
2494
2495 GEPNoWrapFlags NW = getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP));
2496 SmallVector<Value *> Indices;
2497 append_range(Indices, drop_end(Src->indices(),
2498 Src->getNumIndices() - NumVarIndices));
2499 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2500 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2501 // Even if the total offset is inbounds, we may end up representing it
2502 // by first performing a larger negative offset, and then a smaller
2503 // positive one. The large negative offset might go out of bounds. Only
2504 // preserve inbounds if all signs are the same.
2505 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2507 if (!Idx.isNonNegative())
2508 NW = NW.withoutNoUnsignedWrap();
2509 }
2510
2511 return replaceInstUsesWith(
2512 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2513 Indices, "", NW));
2514 }
2515
2516 if (Src->getResultElementType() != GEP.getSourceElementType())
2517 return nullptr;
2518
2519 SmallVector<Value*, 8> Indices;
2520
2521 // Find out whether the last index in the source GEP is a sequential idx.
2522 bool EndsWithSequential = false;
2523 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2524 I != E; ++I)
2525 EndsWithSequential = I.isSequential();
2526
2527 // Can we combine the two pointer arithmetics offsets?
2528 if (EndsWithSequential) {
2529 // Replace: gep (gep %P, long B), long A, ...
2530 // With: T = long A+B; gep %P, T, ...
2531 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2532 Value *GO1 = GEP.getOperand(1);
2533
2534 // If they aren't the same type, then the input hasn't been processed
2535 // by the loop above yet (which canonicalizes sequential index types to
2536 // intptr_t). Just avoid transforming this until the input has been
2537 // normalized.
2538 if (SO1->getType() != GO1->getType())
2539 return nullptr;
2540
2541 Value *Sum =
2542 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2543 // Only do the combine when we are sure the cost after the
2544 // merge is never more than that before the merge.
2545 if (Sum == nullptr)
2546 return nullptr;
2547
2548 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2549 Indices.push_back(Sum);
2550 Indices.append(GEP.op_begin()+2, GEP.op_end());
2551 } else if (isa<Constant>(*GEP.idx_begin()) &&
2552 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2553 Src->getNumOperands() != 1) {
2554 // Otherwise we can do the fold if the first index of the GEP is a zero
2555 Indices.append(Src->op_begin()+1, Src->op_end());
2556 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2557 }
2558
2559 if (!Indices.empty())
2560 return replaceInstUsesWith(
2562 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2563 getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
2564
2565 return nullptr;
2566}
2567
2569 BuilderTy *Builder,
2570 bool &DoesConsume, unsigned Depth) {
2571 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2572 // ~(~(X)) -> X.
2573 Value *A, *B;
2574 if (match(V, m_Not(m_Value(A)))) {
2575 DoesConsume = true;
2576 return A;
2577 }
2578
2579 Constant *C;
2580 // Constants can be considered to be not'ed values.
2581 if (match(V, m_ImmConstant(C)))
2582 return ConstantExpr::getNot(C);
2583
2585 return nullptr;
2586
2587 // The rest of the cases require that we invert all uses so don't bother
2588 // doing the analysis if we know we can't use the result.
2589 if (!WillInvertAllUses)
2590 return nullptr;
2591
2592 // Compares can be inverted if all of their uses are being modified to use
2593 // the ~V.
2594 if (auto *I = dyn_cast<CmpInst>(V)) {
2595 if (Builder != nullptr)
2596 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2597 I->getOperand(1));
2598 return NonNull;
2599 }
2600
2601 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2602 // `(-1 - B) - A` if we are willing to invert all of the uses.
2603 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2604 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2605 DoesConsume, Depth))
2606 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2607 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2608 DoesConsume, Depth))
2609 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2610 return nullptr;
2611 }
2612
2613 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2614 // into `A ^ B` if we are willing to invert all of the uses.
2615 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2616 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2617 DoesConsume, Depth))
2618 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2619 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2620 DoesConsume, Depth))
2621 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2622 return nullptr;
2623 }
2624
2625 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2626 // `A + (-1 - B)` if we are willing to invert all of the uses.
2627 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2628 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2629 DoesConsume, Depth))
2630 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2631 return nullptr;
2632 }
2633
2634 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2635 // into `A s>> B` if we are willing to invert all of the uses.
2636 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2637 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2638 DoesConsume, Depth))
2639 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2640 return nullptr;
2641 }
2642
2643 Value *Cond;
2644 // LogicOps are special in that we canonicalize them at the cost of an
2645 // instruction.
2646 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2647 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2648 // Selects/min/max with invertible operands are freely invertible
2649 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2650 bool LocalDoesConsume = DoesConsume;
2651 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2652 LocalDoesConsume, Depth))
2653 return nullptr;
2654 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2655 LocalDoesConsume, Depth)) {
2656 DoesConsume = LocalDoesConsume;
2657 if (Builder != nullptr) {
2658 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2659 DoesConsume, Depth);
2660 assert(NotB != nullptr &&
2661 "Unable to build inverted value for known freely invertable op");
2662 if (auto *II = dyn_cast<IntrinsicInst>(V))
2664 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2665 return Builder->CreateSelect(Cond, NotA, NotB);
2666 }
2667 return NonNull;
2668 }
2669 }
2670
2671 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2672 bool LocalDoesConsume = DoesConsume;
2674 for (Use &U : PN->operands()) {
2675 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2676 Value *NewIncomingVal = getFreelyInvertedImpl(
2677 U.get(), /*WillInvertAllUses=*/false,
2678 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2679 if (NewIncomingVal == nullptr)
2680 return nullptr;
2681 // Make sure that we can safely erase the original PHI node.
2682 if (NewIncomingVal == V)
2683 return nullptr;
2684 if (Builder != nullptr)
2685 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2686 }
2687
2688 DoesConsume = LocalDoesConsume;
2689 if (Builder != nullptr) {
2692 PHINode *NewPN =
2693 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2694 for (auto [Val, Pred] : IncomingValues)
2695 NewPN->addIncoming(Val, Pred);
2696 return NewPN;
2697 }
2698 return NonNull;
2699 }
2700
2701 if (match(V, m_SExtLike(m_Value(A)))) {
2702 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2703 DoesConsume, Depth))
2704 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2705 return nullptr;
2706 }
2707
2708 if (match(V, m_Trunc(m_Value(A)))) {
2709 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2710 DoesConsume, Depth))
2711 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2712 return nullptr;
2713 }
2714
2715 // De Morgan's Laws:
2716 // (~(A | B)) -> (~A & ~B)
2717 // (~(A & B)) -> (~A | ~B)
2718 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2719 bool IsLogical, Value *A,
2720 Value *B) -> Value * {
2721 bool LocalDoesConsume = DoesConsume;
2722 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2723 LocalDoesConsume, Depth))
2724 return nullptr;
2725 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2726 LocalDoesConsume, Depth)) {
2727 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2728 LocalDoesConsume, Depth);
2729 DoesConsume = LocalDoesConsume;
2730 if (IsLogical)
2731 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2732 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2733 }
2734
2735 return nullptr;
2736 };
2737
2738 if (match(V, m_Or(m_Value(A), m_Value(B))))
2739 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2740 B);
2741
2742 if (match(V, m_And(m_Value(A), m_Value(B))))
2743 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2744 B);
2745
2746 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2747 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2748 B);
2749
2750 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2751 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2752 B);
2753
2754 return nullptr;
2755}
2756
2757/// Return true if we should canonicalize the gep to an i8 ptradd.
2759 Value *PtrOp = GEP.getOperand(0);
2760 Type *GEPEltType = GEP.getSourceElementType();
2761 if (GEPEltType->isIntegerTy(8))
2762 return false;
2763
2764 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
2765 // intrinsic. This has better support in BasicAA.
2766 if (GEPEltType->isScalableTy())
2767 return true;
2768
2769 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
2770 // together.
2771 if (GEP.getNumIndices() == 1 &&
2772 match(GEP.getOperand(1),
2774 m_Shl(m_Value(), m_ConstantInt())))))
2775 return true;
2776
2777 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
2778 // possibly be merged together.
2779 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2780 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2781 any_of(GEP.indices(), [](Value *V) {
2782 const APInt *C;
2783 return match(V, m_APInt(C)) && !C->isZero();
2784 });
2785}
2786
2788 IRBuilderBase &Builder) {
2789 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2790 if (!Op1)
2791 return nullptr;
2792
2793 // Don't fold a GEP into itself through a PHI node. This can only happen
2794 // through the back-edge of a loop. Folding a GEP into itself means that
2795 // the value of the previous iteration needs to be stored in the meantime,
2796 // thus requiring an additional register variable to be live, but not
2797 // actually achieving anything (the GEP still needs to be executed once per
2798 // loop iteration).
2799 if (Op1 == &GEP)
2800 return nullptr;
2801 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
2802
2803 int DI = -1;
2804
2805 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2806 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2807 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2808 Op1->getSourceElementType() != Op2->getSourceElementType())
2809 return nullptr;
2810
2811 // As for Op1 above, don't try to fold a GEP into itself.
2812 if (Op2 == &GEP)
2813 return nullptr;
2814
2815 // Keep track of the type as we walk the GEP.
2816 Type *CurTy = nullptr;
2817
2818 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2819 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2820 return nullptr;
2821
2822 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2823 if (DI == -1) {
2824 // We have not seen any differences yet in the GEPs feeding the
2825 // PHI yet, so we record this one if it is allowed to be a
2826 // variable.
2827
2828 // The first two arguments can vary for any GEP, the rest have to be
2829 // static for struct slots
2830 if (J > 1) {
2831 assert(CurTy && "No current type?");
2832 if (CurTy->isStructTy())
2833 return nullptr;
2834 }
2835
2836 DI = J;
2837 } else {
2838 // The GEP is different by more than one input. While this could be
2839 // extended to support GEPs that vary by more than one variable it
2840 // doesn't make sense since it greatly increases the complexity and
2841 // would result in an R+R+R addressing mode which no backend
2842 // directly supports and would need to be broken into several
2843 // simpler instructions anyway.
2844 return nullptr;
2845 }
2846 }
2847
2848 // Sink down a layer of the type for the next iteration.
2849 if (J > 0) {
2850 if (J == 1) {
2851 CurTy = Op1->getSourceElementType();
2852 } else {
2853 CurTy =
2854 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2855 }
2856 }
2857 }
2858
2859 NW &= Op2->getNoWrapFlags();
2860 }
2861
2862 // If not all GEPs are identical we'll have to create a new PHI node.
2863 // Check that the old PHI node has only one use so that it will get
2864 // removed.
2865 if (DI != -1 && !PN->hasOneUse())
2866 return nullptr;
2867
2868 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2869 NewGEP->setNoWrapFlags(NW);
2870
2871 if (DI == -1) {
2872 // All the GEPs feeding the PHI are identical. Clone one down into our
2873 // BB so that it can be merged with the current GEP.
2874 } else {
2875 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2876 // into the current block so it can be merged, and create a new PHI to
2877 // set that index.
2878 PHINode *NewPN;
2879 {
2880 IRBuilderBase::InsertPointGuard Guard(Builder);
2881 Builder.SetInsertPoint(PN);
2882 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2883 PN->getNumOperands());
2884 }
2885
2886 for (auto &I : PN->operands())
2887 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2888 PN->getIncomingBlock(I));
2889
2890 NewGEP->setOperand(DI, NewPN);
2891 }
2892
2893 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2894 return NewGEP;
2895}
2896
2898 Value *PtrOp = GEP.getOperand(0);
2899 SmallVector<Value *, 8> Indices(GEP.indices());
2900 Type *GEPType = GEP.getType();
2901 Type *GEPEltType = GEP.getSourceElementType();
2902 if (Value *V =
2903 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
2905 return replaceInstUsesWith(GEP, V);
2906
2907 // For vector geps, use the generic demanded vector support.
2908 // Skip if GEP return type is scalable. The number of elements is unknown at
2909 // compile-time.
2910 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2911 auto VWidth = GEPFVTy->getNumElements();
2912 APInt PoisonElts(VWidth, 0);
2913 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2914 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2915 PoisonElts)) {
2916 if (V != &GEP)
2917 return replaceInstUsesWith(GEP, V);
2918 return &GEP;
2919 }
2920
2921 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2922 // possible (decide on canonical form for pointer broadcast), 3) exploit
2923 // undef elements to decrease demanded bits
2924 }
2925
2926 // Eliminate unneeded casts for indices, and replace indices which displace
2927 // by multiples of a zero size type with zero.
2928 bool MadeChange = false;
2929
2930 // Index width may not be the same width as pointer width.
2931 // Data layout chooses the right type based on supported integer types.
2932 Type *NewScalarIndexTy =
2933 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2934
2936 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2937 ++I, ++GTI) {
2938 // Skip indices into struct types.
2939 if (GTI.isStruct())
2940 continue;
2941
2942 Type *IndexTy = (*I)->getType();
2943 Type *NewIndexType =
2944 IndexTy->isVectorTy()
2945 ? VectorType::get(NewScalarIndexTy,
2946 cast<VectorType>(IndexTy)->getElementCount())
2947 : NewScalarIndexTy;
2948
2949 // If the element type has zero size then any index over it is equivalent
2950 // to an index of zero, so replace it with zero if it is not zero already.
2951 Type *EltTy = GTI.getIndexedType();
2952 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2953 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2954 *I = Constant::getNullValue(NewIndexType);
2955 MadeChange = true;
2956 }
2957
2958 if (IndexTy != NewIndexType) {
2959 // If we are using a wider index than needed for this platform, shrink
2960 // it to what we need. If narrower, sign-extend it to what we need.
2961 // This explicit cast can make subsequent optimizations more obvious.
2962 *I = Builder.CreateIntCast(*I, NewIndexType, true);
2963 MadeChange = true;
2964 }
2965 }
2966 if (MadeChange)
2967 return &GEP;
2968
2969 // Canonicalize constant GEPs to i8 type.
2970 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
2972 if (GEP.accumulateConstantOffset(DL, Offset))
2973 return replaceInstUsesWith(
2975 GEP.getNoWrapFlags()));
2976 }
2977
2979 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
2980 Value *NewGEP =
2981 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
2982 return replaceInstUsesWith(GEP, NewGEP);
2983 }
2984
2985 // Check to see if the inputs to the PHI node are getelementptr instructions.
2986 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2987 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
2988 return replaceOperand(GEP, 0, NewPtrOp);
2989 }
2990
2991 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2992 if (Instruction *I = visitGEPOfGEP(GEP, Src))
2993 return I;
2994
2995 if (GEP.getNumIndices() == 1) {
2996 unsigned AS = GEP.getPointerAddressSpace();
2997 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2998 DL.getIndexSizeInBits(AS)) {
2999 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3000
3001 if (TyAllocSize == 1) {
3002 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3003 // but only if the result pointer is only used as if it were an integer,
3004 // or both point to the same underlying object (otherwise provenance is
3005 // not necessarily retained).
3006 Value *X = GEP.getPointerOperand();
3007 Value *Y;
3008 if (match(GEP.getOperand(1),
3010 GEPType == Y->getType()) {
3011 bool HasSameUnderlyingObject =
3013 bool Changed = false;
3014 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3015 bool ShouldReplace = HasSameUnderlyingObject ||
3016 isa<ICmpInst>(U.getUser()) ||
3017 isa<PtrToIntInst>(U.getUser());
3018 Changed |= ShouldReplace;
3019 return ShouldReplace;
3020 });
3021 return Changed ? &GEP : nullptr;
3022 }
3023 } else if (auto *ExactIns =
3024 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3025 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3026 Value *V;
3027 if (ExactIns->isExact()) {
3028 if ((has_single_bit(TyAllocSize) &&
3029 match(GEP.getOperand(1),
3030 m_Shr(m_Value(V),
3031 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3032 match(GEP.getOperand(1),
3033 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3035 GEP.getPointerOperand(), V,
3036 GEP.getNoWrapFlags());
3037 }
3038 }
3039 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3040 // Try to canonicalize non-i8 element type to i8 if the index is an
3041 // exact instruction. If the index is an exact instruction (div/shr)
3042 // with a constant RHS, we can fold the non-i8 element scale into the
3043 // div/shr (similiar to the mul case, just inverted).
3044 const APInt *C;
3045 std::optional<APInt> NewC;
3046 if (has_single_bit(TyAllocSize) &&
3047 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3048 C->uge(countr_zero(TyAllocSize)))
3049 NewC = *C - countr_zero(TyAllocSize);
3050 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3051 APInt Quot;
3052 uint64_t Rem;
3053 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3054 if (Rem == 0)
3055 NewC = Quot;
3056 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3057 APInt Quot;
3058 int64_t Rem;
3059 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3060 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3061 if (!Quot.isAllOnes() && Rem == 0)
3062 NewC = Quot;
3063 }
3064
3065 if (NewC.has_value()) {
3066 Value *NewOp = Builder.CreateBinOp(
3067 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3068 ConstantInt::get(V->getType(), *NewC));
3069 cast<BinaryOperator>(NewOp)->setIsExact();
3071 GEP.getPointerOperand(), NewOp,
3072 GEP.getNoWrapFlags());
3073 }
3074 }
3075 }
3076 }
3077 }
3078 // We do not handle pointer-vector geps here.
3079 if (GEPType->isVectorTy())
3080 return nullptr;
3081
3082 if (GEP.getNumIndices() == 1) {
3083 // We can only preserve inbounds if the original gep is inbounds, the add
3084 // is nsw, and the add operands are non-negative.
3085 auto CanPreserveInBounds = [&](bool AddIsNSW, Value *Idx1, Value *Idx2) {
3087 return GEP.isInBounds() && AddIsNSW && isKnownNonNegative(Idx1, Q) &&
3088 isKnownNonNegative(Idx2, Q);
3089 };
3090
3091 // Try to replace ADD + GEP with GEP + GEP.
3092 Value *Idx1, *Idx2;
3093 if (match(GEP.getOperand(1),
3094 m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) {
3095 // %idx = add i64 %idx1, %idx2
3096 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3097 // as:
3098 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3099 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3100 bool IsInBounds = CanPreserveInBounds(
3101 cast<OverflowingBinaryOperator>(GEP.getOperand(1))->hasNoSignedWrap(),
3102 Idx1, Idx2);
3103 auto *NewPtr =
3104 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3105 Idx1, "", IsInBounds);
3106 return replaceInstUsesWith(
3107 GEP, Builder.CreateGEP(GEP.getSourceElementType(), NewPtr, Idx2, "",
3108 IsInBounds));
3109 }
3110 ConstantInt *C;
3111 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd(
3112 m_Value(Idx1), m_ConstantInt(C))))))) {
3113 // %add = add nsw i32 %idx1, idx2
3114 // %sidx = sext i32 %add to i64
3115 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3116 // as:
3117 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3118 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3119 bool IsInBounds = CanPreserveInBounds(
3120 /*IsNSW=*/true, Idx1, C);
3121 auto *NewPtr = Builder.CreateGEP(
3122 GEP.getSourceElementType(), GEP.getPointerOperand(),
3123 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "",
3124 IsInBounds);
3125 return replaceInstUsesWith(
3126 GEP,
3127 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3128 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3129 "", IsInBounds));
3130 }
3131 }
3132
3133 if (!GEP.isInBounds()) {
3134 unsigned IdxWidth =
3136 APInt BasePtrOffset(IdxWidth, 0);
3137 Value *UnderlyingPtrOp =
3139 BasePtrOffset);
3140 bool CanBeNull, CanBeFreed;
3141 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3142 DL, CanBeNull, CanBeFreed);
3143 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3144 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3145 BasePtrOffset.isNonNegative()) {
3146 APInt AllocSize(IdxWidth, DerefBytes);
3147 if (BasePtrOffset.ule(AllocSize)) {
3149 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3150 }
3151 }
3152 }
3153 }
3154
3155 // nusw + nneg -> nuw
3156 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3157 all_of(GEP.indices(), [&](Value *Idx) {
3158 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3159 })) {
3160 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3161 return &GEP;
3162 }
3163
3165 return R;
3166
3167 return nullptr;
3168}
3169
3171 Instruction *AI) {
3172 if (isa<ConstantPointerNull>(V))
3173 return true;
3174 if (auto *LI = dyn_cast<LoadInst>(V))
3175 return isa<GlobalVariable>(LI->getPointerOperand());
3176 // Two distinct allocations will never be equal.
3177 return isAllocLikeFn(V, &TLI) && V != AI;
3178}
3179
3180/// Given a call CB which uses an address UsedV, return true if we can prove the
3181/// call's only possible effect is storing to V.
3182static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3183 const TargetLibraryInfo &TLI) {
3184 if (!CB.use_empty())
3185 // TODO: add recursion if returned attribute is present
3186 return false;
3187
3188 if (CB.isTerminator())
3189 // TODO: remove implementation restriction
3190 return false;
3191
3192 if (!CB.willReturn() || !CB.doesNotThrow())
3193 return false;
3194
3195 // If the only possible side effect of the call is writing to the alloca,
3196 // and the result isn't used, we can safely remove any reads implied by the
3197 // call including those which might read the alloca itself.
3198 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3199 return Dest && Dest->Ptr == UsedV;
3200}
3201
3204 const TargetLibraryInfo &TLI) {
3206 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3207 Worklist.push_back(AI);
3208
3209 do {
3210 Instruction *PI = Worklist.pop_back_val();
3211 for (User *U : PI->users()) {
3212 Instruction *I = cast<Instruction>(U);
3213 switch (I->getOpcode()) {
3214 default:
3215 // Give up the moment we see something we can't handle.
3216 return false;
3217
3218 case Instruction::AddrSpaceCast:
3219 case Instruction::BitCast:
3220 case Instruction::GetElementPtr:
3221 Users.emplace_back(I);
3222 Worklist.push_back(I);
3223 continue;
3224
3225 case Instruction::ICmp: {
3226 ICmpInst *ICI = cast<ICmpInst>(I);
3227 // We can fold eq/ne comparisons with null to false/true, respectively.
3228 // We also fold comparisons in some conditions provided the alloc has
3229 // not escaped (see isNeverEqualToUnescapedAlloc).
3230 if (!ICI->isEquality())
3231 return false;
3232 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3233 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3234 return false;
3235
3236 // Do not fold compares to aligned_alloc calls, as they may have to
3237 // return null in case the required alignment cannot be satisfied,
3238 // unless we can prove that both alignment and size are valid.
3239 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3240 // Check if alignment and size of a call to aligned_alloc is valid,
3241 // that is alignment is a power-of-2 and the size is a multiple of the
3242 // alignment.
3243 const APInt *Alignment;
3244 const APInt *Size;
3245 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3246 match(CB->getArgOperand(1), m_APInt(Size)) &&
3247 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3248 };
3249 auto *CB = dyn_cast<CallBase>(AI);
3250 LibFunc TheLibFunc;
3251 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3252 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3253 !AlignmentAndSizeKnownValid(CB))
3254 return false;
3255 Users.emplace_back(I);
3256 continue;
3257 }
3258
3259 case Instruction::Call:
3260 // Ignore no-op and store intrinsics.
3261 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3262 switch (II->getIntrinsicID()) {
3263 default:
3264 return false;
3265
3266 case Intrinsic::memmove:
3267 case Intrinsic::memcpy:
3268 case Intrinsic::memset: {
3269 MemIntrinsic *MI = cast<MemIntrinsic>(II);
3270 if (MI->isVolatile() || MI->getRawDest() != PI)
3271 return false;
3272 [[fallthrough]];
3273 }
3274 case Intrinsic::assume:
3275 case Intrinsic::invariant_start:
3276 case Intrinsic::invariant_end:
3277 case Intrinsic::lifetime_start:
3278 case Intrinsic::lifetime_end:
3279 case Intrinsic::objectsize:
3280 Users.emplace_back(I);
3281 continue;
3282 case Intrinsic::launder_invariant_group:
3283 case Intrinsic::strip_invariant_group:
3284 Users.emplace_back(I);
3285 Worklist.push_back(I);
3286 continue;
3287 }
3288 }
3289
3290 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3291 Users.emplace_back(I);
3292 continue;
3293 }
3294
3295 if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3296 getAllocationFamily(I, &TLI) == Family) {
3297 assert(Family);
3298 Users.emplace_back(I);
3299 continue;
3300 }
3301
3302 if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
3303 getAllocationFamily(I, &TLI) == Family) {
3304 assert(Family);
3305 Users.emplace_back(I);
3306 Worklist.push_back(I);
3307 continue;
3308 }
3309
3310 return false;
3311
3312 case Instruction::Store: {
3313 StoreInst *SI = cast<StoreInst>(I);
3314 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3315 return false;
3316 Users.emplace_back(I);
3317 continue;
3318 }
3319 }
3320 llvm_unreachable("missing a return?");
3321 }
3322 } while (!Worklist.empty());
3323 return true;
3324}
3325
3327 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
3328
3329 // If we have a malloc call which is only used in any amount of comparisons to
3330 // null and free calls, delete the calls and replace the comparisons with true
3331 // or false as appropriate.
3332
3333 // This is based on the principle that we can substitute our own allocation
3334 // function (which will never return null) rather than knowledge of the
3335 // specific function being called. In some sense this can change the permitted
3336 // outputs of a program (when we convert a malloc to an alloca, the fact that
3337 // the allocation is now on the stack is potentially visible, for example),
3338 // but we believe in a permissible manner.
3340
3341 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3342 // before each store.
3345 std::unique_ptr<DIBuilder> DIB;
3346 if (isa<AllocaInst>(MI)) {
3347 findDbgUsers(DVIs, &MI, &DVRs);
3348 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3349 }
3350
3351 if (isAllocSiteRemovable(&MI, Users, TLI)) {
3352 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3353 // Lowering all @llvm.objectsize calls first because they may
3354 // use a bitcast/GEP of the alloca we are removing.
3355 if (!Users[i])
3356 continue;
3357
3358 Instruction *I = cast<Instruction>(&*Users[i]);
3359
3360 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3361 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3362 SmallVector<Instruction *> InsertedInstructions;
3363 Value *Result = lowerObjectSizeCall(
3364 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3365 for (Instruction *Inserted : InsertedInstructions)
3366 Worklist.add(Inserted);
3367 replaceInstUsesWith(*I, Result);
3369 Users[i] = nullptr; // Skip examining in the next loop.
3370 }
3371 }
3372 }
3373 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3374 if (!Users[i])
3375 continue;
3376
3377 Instruction *I = cast<Instruction>(&*Users[i]);
3378
3379 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3381 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3382 C->isFalseWhenEqual()));
3383 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3384 for (auto *DVI : DVIs)
3385 if (DVI->isAddressOfVariable())
3386 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
3387 for (auto *DVR : DVRs)
3388 if (DVR->isAddressOfVariable())
3389 ConvertDebugDeclareToDebugValue(DVR, SI, *DIB);
3390 } else {
3391 // Casts, GEP, or anything else: we're about to delete this instruction,
3392 // so it can not have any valid uses.
3393 replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
3394 }
3396 }
3397
3398 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
3399 // Replace invoke with a NOP intrinsic to maintain the original CFG
3400 Module *M = II->getModule();
3401 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3402 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), {}, "",
3403 II->getParent());
3404 }
3405
3406 // Remove debug intrinsics which describe the value contained within the
3407 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3408 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3409 //
3410 // ```
3411 // define void @foo(i32 %0) {
3412 // %a = alloca i32 ; Deleted.
3413 // store i32 %0, i32* %a
3414 // dbg.value(i32 %0, "arg0") ; Not deleted.
3415 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3416 // call void @trivially_inlinable_no_op(i32* %a)
3417 // ret void
3418 // }
3419 // ```
3420 //
3421 // This may not be required if we stop describing the contents of allocas
3422 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3423 // the LowerDbgDeclare utility.
3424 //
3425 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3426 // "arg0" dbg.value may be stale after the call. However, failing to remove
3427 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3428 //
3429 // FIXME: the Assignment Tracking project has now likely made this
3430 // redundant (and it's sometimes harmful).
3431 for (auto *DVI : DVIs)
3432 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3433 DVI->eraseFromParent();
3434 for (auto *DVR : DVRs)
3435 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3436 DVR->eraseFromParent();
3437
3438 return eraseInstFromFunction(MI);
3439 }
3440 return nullptr;
3441}
3442
3443/// Move the call to free before a NULL test.
3444///
3445/// Check if this free is accessed after its argument has been test
3446/// against NULL (property 0).
3447/// If yes, it is legal to move this call in its predecessor block.
3448///
3449/// The move is performed only if the block containing the call to free
3450/// will be removed, i.e.:
3451/// 1. it has only one predecessor P, and P has two successors
3452/// 2. it contains the call, noops, and an unconditional branch
3453/// 3. its successor is the same as its predecessor's successor
3454///
3455/// The profitability is out-of concern here and this function should
3456/// be called only if the caller knows this transformation would be
3457/// profitable (e.g., for code size).
3459 const DataLayout &DL) {
3460 Value *Op = FI.getArgOperand(0);
3461 BasicBlock *FreeInstrBB = FI.getParent();
3462 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3463
3464 // Validate part of constraint #1: Only one predecessor
3465 // FIXME: We can extend the number of predecessor, but in that case, we
3466 // would duplicate the call to free in each predecessor and it may
3467 // not be profitable even for code size.
3468 if (!PredBB)
3469 return nullptr;
3470
3471 // Validate constraint #2: Does this block contains only the call to
3472 // free, noops, and an unconditional branch?
3473 BasicBlock *SuccBB;
3474 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3475 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3476 return nullptr;
3477
3478 // If there are only 2 instructions in the block, at this point,
3479 // this is the call to free and unconditional.
3480 // If there are more than 2 instructions, check that they are noops
3481 // i.e., they won't hurt the performance of the generated code.
3482 if (FreeInstrBB->size() != 2) {
3483 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3484 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3485 continue;
3486 auto *Cast = dyn_cast<CastInst>(&Inst);
3487 if (!Cast || !Cast->isNoopCast(DL))
3488 return nullptr;
3489 }
3490 }
3491 // Validate the rest of constraint #1 by matching on the pred branch.
3492 Instruction *TI = PredBB->getTerminator();
3493 BasicBlock *TrueBB, *FalseBB;
3494 CmpPredicate Pred;
3495 if (!match(TI, m_Br(m_ICmp(Pred,
3497 m_Specific(Op->stripPointerCasts())),
3498 m_Zero()),
3499 TrueBB, FalseBB)))
3500 return nullptr;
3501 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3502 return nullptr;
3503
3504 // Validate constraint #3: Ensure the null case just falls through.
3505 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3506 return nullptr;
3507 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3508 "Broken CFG: missing edge from predecessor to successor");
3509
3510 // At this point, we know that everything in FreeInstrBB can be moved
3511 // before TI.
3512 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3513 if (&Instr == FreeInstrBBTerminator)
3514 break;
3515 Instr.moveBeforePreserving(TI);
3516 }
3517 assert(FreeInstrBB->size() == 1 &&
3518 "Only the branch instruction should remain");
3519
3520 // Now that we've moved the call to free before the NULL check, we have to
3521 // remove any attributes on its parameter that imply it's non-null, because
3522 // those attributes might have only been valid because of the NULL check, and
3523 // we can get miscompiles if we keep them. This is conservative if non-null is
3524 // also implied by something other than the NULL check, but it's guaranteed to
3525 // be correct, and the conservativeness won't matter in practice, since the
3526 // attributes are irrelevant for the call to free itself and the pointer
3527 // shouldn't be used after the call.
3528 AttributeList Attrs = FI.getAttributes();
3529 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3530 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3531 if (Dereferenceable.isValid()) {
3532 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3533 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3534 Attribute::Dereferenceable);
3535 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3536 }
3537 FI.setAttributes(Attrs);
3538
3539 return &FI;
3540}
3541
3543 // free undef -> unreachable.
3544 if (isa<UndefValue>(Op)) {
3545 // Leave a marker since we can't modify the CFG here.
3547 return eraseInstFromFunction(FI);
3548 }
3549
3550 // If we have 'free null' delete the instruction. This can happen in stl code
3551 // when lots of inlining happens.
3552 if (isa<ConstantPointerNull>(Op))
3553 return eraseInstFromFunction(FI);
3554
3555 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3556 // realloc() entirely.
3557 CallInst *CI = dyn_cast<CallInst>(Op);
3558 if (CI && CI->hasOneUse())
3559 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3560 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3561
3562 // If we optimize for code size, try to move the call to free before the null
3563 // test so that simplify cfg can remove the empty block and dead code
3564 // elimination the branch. I.e., helps to turn something like:
3565 // if (foo) free(foo);
3566 // into
3567 // free(foo);
3568 //
3569 // Note that we can only do this for 'free' and not for any flavor of
3570 // 'operator delete'; there is no 'operator delete' symbol for which we are
3571 // permitted to invent a call, even if we're passing in a null pointer.
3572 if (MinimizeSize) {
3573 LibFunc Func;
3574 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3576 return I;
3577 }
3578
3579 return nullptr;
3580}
3581
3583 Value *RetVal = RI.getReturnValue();
3584 if (!RetVal || !AttributeFuncs::isNoFPClassCompatibleType(RetVal->getType()))
3585 return nullptr;
3586
3587 Function *F = RI.getFunction();
3588 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3589 if (ReturnClass == fcNone)
3590 return nullptr;
3591
3592 KnownFPClass KnownClass;
3593 Value *Simplified =
3594 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, 0, &RI);
3595 if (!Simplified)
3596 return nullptr;
3597
3598 return ReturnInst::Create(RI.getContext(), Simplified);
3599}
3600
3601// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3603 // Try to remove the previous instruction if it must lead to unreachable.
3604 // This includes instructions like stores and "llvm.assume" that may not get
3605 // removed by simple dead code elimination.
3606 bool Changed = false;
3607 while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3608 // While we theoretically can erase EH, that would result in a block that
3609 // used to start with an EH no longer starting with EH, which is invalid.
3610 // To make it valid, we'd need to fixup predecessors to no longer refer to
3611 // this block, but that changes CFG, which is not allowed in InstCombine.
3612 if (Prev->isEHPad())
3613 break; // Can not drop any more instructions. We're done here.
3614
3616 break; // Can not drop any more instructions. We're done here.
3617 // Otherwise, this instruction can be freely erased,
3618 // even if it is not side-effect free.
3619
3620 // A value may still have uses before we process it here (for example, in
3621 // another unreachable block), so convert those to poison.
3622 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3623 eraseInstFromFunction(*Prev);
3624 Changed = true;
3625 }
3626 return Changed;
3627}
3628
3631 return nullptr;
3632}
3633
3635 assert(BI.isUnconditional() && "Only for unconditional branches.");
3636
3637 // If this store is the second-to-last instruction in the basic block
3638 // (excluding debug info and bitcasts of pointers) and if the block ends with
3639 // an unconditional branch, try to move the store to the successor block.
3640
3641 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3642 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3643 return BBI->isDebugOrPseudoInst() ||
3644 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3645 };
3646
3647 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3648 do {
3649 if (BBI != FirstInstr)
3650 --BBI;
3651 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3652
3653 return dyn_cast<StoreInst>(BBI);
3654 };
3655
3656 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3657 if (mergeStoreIntoSuccessor(*SI))
3658 return &BI;
3659
3660 return nullptr;
3661}
3662
3665 if (!DeadEdges.insert({From, To}).second)
3666 return;
3667
3668 // Replace phi node operands in successor with poison.
3669 for (PHINode &PN : To->phis())
3670 for (Use &U : PN.incoming_values())
3671 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
3672 replaceUse(U, PoisonValue::get(PN.getType()));
3673 addToWorklist(&PN);
3674 MadeIRChange = true;
3675 }
3676
3677 Worklist.push_back(To);
3678}
3679
3680// Under the assumption that I is unreachable, remove it and following
3681// instructions. Changes are reported directly to MadeIRChange.
3684 BasicBlock *BB = I->getParent();
3685 for (Instruction &Inst : make_early_inc_range(
3686 make_range(std::next(BB->getTerminator()->getReverseIterator()),
3687 std::next(I->getReverseIterator())))) {
3688 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3689 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
3690 MadeIRChange = true;
3691 }
3692 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3693 continue;
3694 // RemoveDIs: erase debug-info on this instruction manually.
3695 Inst.dropDbgRecords();
3697 MadeIRChange = true;
3698 }
3699
3700 SmallVector<Value *> Changed;
3701 if (handleUnreachableTerminator(BB->getTerminator(), Changed)) {
3702 MadeIRChange = true;
3703 for (Value *V : Changed)
3704 addToWorklist(cast<Instruction>(V));
3705 }
3706
3707 // Handle potentially dead successors.
3708 for (BasicBlock *Succ : successors(BB))
3709 addDeadEdge(BB, Succ, Worklist);
3710}
3711
3714 while (!Worklist.empty()) {
3715 BasicBlock *BB = Worklist.pop_back_val();
3716 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
3717 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
3718 }))
3719 continue;
3720
3722 }
3723}
3724
3726 BasicBlock *LiveSucc) {
3728 for (BasicBlock *Succ : successors(BB)) {
3729 // The live successor isn't dead.
3730 if (Succ == LiveSucc)
3731 continue;
3732
3733 addDeadEdge(BB, Succ, Worklist);
3734 }
3735
3737}
3738
3740 if (BI.isUnconditional())
3742
3743 // Change br (not X), label True, label False to: br X, label False, True
3744 Value *Cond = BI.getCondition();
3745 Value *X;
3746 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
3747 // Swap Destinations and condition...
3748 BI.swapSuccessors();
3749 if (BPI)
3751 return replaceOperand(BI, 0, X);
3752 }
3753
3754 // Canonicalize logical-and-with-invert as logical-or-with-invert.
3755 // This is done by inverting the condition and swapping successors:
3756 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
3757 Value *Y;
3758 if (isa<SelectInst>(Cond) &&
3759 match(Cond,
3761 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
3762 Value *Or = Builder.CreateLogicalOr(NotX, Y);
3763 BI.swapSuccessors();
3764 if (BPI)
3766 return replaceOperand(BI, 0, Or);
3767 }
3768
3769 // If the condition is irrelevant, remove the use so that other
3770 // transforms on the condition become more effective.
3771 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
3772 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
3773
3774 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3775 CmpPredicate Pred;
3776 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
3777 !isCanonicalPredicate(Pred)) {
3778 // Swap destinations and condition.
3779 auto *Cmp = cast<CmpInst>(Cond);
3780 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
3781 BI.swapSuccessors();
3782 if (BPI)
3784 Worklist.push(Cmp);
3785 return &BI;
3786 }
3787
3788 if (isa<UndefValue>(Cond)) {
3789 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
3790 return nullptr;
3791 }
3792 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3794 BI.getSuccessor(!CI->getZExtValue()));
3795 return nullptr;
3796 }
3797
3798 // Replace all dominated uses of the condition with true/false
3799 // Ignore constant expressions to avoid iterating over uses on other
3800 // functions.
3801 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
3802 for (auto &U : make_early_inc_range(Cond->uses())) {
3803 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
3804 if (DT.dominates(Edge0, U)) {
3805 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
3806 addToWorklist(cast<Instruction>(U.getUser()));
3807 continue;
3808 }
3809 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
3810 if (DT.dominates(Edge1, U)) {
3811 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
3812 addToWorklist(cast<Instruction>(U.getUser()));
3813 }
3814 }
3815 }
3816
3817 DC.registerBranch(&BI);
3818 return nullptr;
3819}
3820
3821// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
3822// we can prove that both (switch C) and (switch X) go to the default when cond
3823// is false/true.
3826 bool IsTrueArm) {
3827 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3828 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
3829 if (!C)
3830 return nullptr;
3831
3832 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
3833 if (CstBB != SI.getDefaultDest())
3834 return nullptr;
3835 Value *X = Select->getOperand(3 - CstOpIdx);
3836 CmpPredicate Pred;
3837 const APInt *RHSC;
3838 if (!match(Select->getCondition(),
3839 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
3840 return nullptr;
3841 if (IsTrueArm)
3842 Pred = ICmpInst::getInversePredicate(Pred);
3843
3844 // See whether we can replace the select with X
3846 for (auto Case : SI.cases())
3847 if (!CR.contains(Case.getCaseValue()->getValue()))
3848 return nullptr;
3849
3850 return X;
3851}
3852
3854 Value *Cond = SI.getCondition();
3855 Value *Op0;
3856 ConstantInt *AddRHS;
3857 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3858 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3859 for (auto Case : SI.cases()) {
3860 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3861 assert(isa<ConstantInt>(NewCase) &&
3862 "Result of expression should be constant");
3863 Case.setValue(cast<ConstantInt>(NewCase));
3864 }
3865 return replaceOperand(SI, 0, Op0);
3866 }
3867
3868 ConstantInt *SubLHS;
3869 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
3870 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
3871 for (auto Case : SI.cases()) {
3872 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
3873 assert(isa<ConstantInt>(NewCase) &&
3874 "Result of expression should be constant");
3875 Case.setValue(cast<ConstantInt>(NewCase));
3876 }
3877 return replaceOperand(SI, 0, Op0);
3878 }
3879
3880 uint64_t ShiftAmt;
3881 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
3882 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
3883 all_of(SI.cases(), [&](const auto &Case) {
3884 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3885 })) {
3886 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
3887 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond);
3888 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
3889 Shl->hasOneUse()) {
3890 Value *NewCond = Op0;
3891 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
3892 // If the shift may wrap, we need to mask off the shifted bits.
3893 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
3894 NewCond = Builder.CreateAnd(
3895 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
3896 }
3897 for (auto Case : SI.cases()) {
3898 const APInt &CaseVal = Case.getCaseValue()->getValue();
3899 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
3900 : CaseVal.lshr(ShiftAmt);
3901 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3902 }
3903 return replaceOperand(SI, 0, NewCond);
3904 }
3905 }
3906
3907 // Fold switch(zext/sext(X)) into switch(X) if possible.
3908 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
3909 bool IsZExt = isa<ZExtInst>(Cond);
3910 Type *SrcTy = Op0->getType();
3911 unsigned NewWidth = SrcTy->getScalarSizeInBits();
3912
3913 if (all_of(SI.cases(), [&](const auto &Case) {
3914 const APInt &CaseVal = Case.getCaseValue()->getValue();
3915 return IsZExt ? CaseVal.isIntN(NewWidth)
3916 : CaseVal.isSignedIntN(NewWidth);
3917 })) {
3918 for (auto &Case : SI.cases()) {
3919 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3920 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3921 }
3922 return replaceOperand(SI, 0, Op0);
3923 }
3924 }
3925
3926 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
3927 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
3928 if (Value *V =
3929 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
3930 return replaceOperand(SI, 0, V);
3931 if (Value *V =
3932 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
3933 return replaceOperand(SI, 0, V);
3934 }
3935
3936 KnownBits Known = computeKnownBits(Cond, 0, &SI);
3937 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3938 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3939
3940 // Compute the number of leading bits we can ignore.
3941 // TODO: A better way to determine this would use ComputeNumSignBits().
3942 for (const auto &C : SI.cases()) {
3943 LeadingKnownZeros =
3944 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
3945 LeadingKnownOnes =
3946 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
3947 }
3948
3949 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3950
3951 // Shrink the condition operand if the new type is smaller than the old type.
3952 // But do not shrink to a non-standard type, because backend can't generate
3953 // good code for that yet.
3954 // TODO: We can make it aggressive again after fixing PR39569.
3955 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3956 shouldChangeType(Known.getBitWidth(), NewWidth)) {
3957 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3959 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3960
3961 for (auto Case : SI.cases()) {
3962 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3963 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3964 }
3965 return replaceOperand(SI, 0, NewCond);
3966 }
3967
3968 if (isa<UndefValue>(Cond)) {
3969 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
3970 return nullptr;
3971 }
3972 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3973 handlePotentiallyDeadSuccessors(SI.getParent(),
3974 SI.findCaseValue(CI)->getCaseSuccessor());
3975 return nullptr;
3976 }
3977
3978 return nullptr;
3979}
3980
3982InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
3983 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
3984 if (!WO)
3985 return nullptr;
3986
3987 Intrinsic::ID OvID = WO->getIntrinsicID();
3988 const APInt *C = nullptr;
3989 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
3990 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3991 OvID == Intrinsic::umul_with_overflow)) {
3992 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
3993 if (C->isAllOnes())
3994 return BinaryOperator::CreateNeg(WO->getLHS());
3995 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
3996 if (C->isPowerOf2()) {
3997 return BinaryOperator::CreateShl(
3998 WO->getLHS(),
3999 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4000 }
4001 }
4002 }
4003
4004 // We're extracting from an overflow intrinsic. See if we're the only user.
4005 // That allows us to simplify multiple result intrinsics to simpler things
4006 // that just get one value.
4007 if (!WO->hasOneUse())
4008 return nullptr;
4009
4010 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4011 // and replace it with a traditional binary instruction.
4012 if (*EV.idx_begin() == 0) {
4013 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4014 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4015 // Replace the old instruction's uses with poison.
4016 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4018 return BinaryOperator::Create(BinOp, LHS, RHS);
4019 }
4020
4021 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4022
4023 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4024 if (OvID == Intrinsic::usub_with_overflow)
4025 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4026
4027 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4028 // +1 is not possible because we assume signed values.
4029 if (OvID == Intrinsic::smul_with_overflow &&
4030 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4031 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4032
4033 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4034 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4035 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4036 // Only handle even bitwidths for performance reasons.
4037 if (BitWidth % 2 == 0)
4038 return new ICmpInst(
4039 ICmpInst::ICMP_UGT, WO->getLHS(),
4040 ConstantInt::get(WO->getLHS()->getType(),
4042 }
4043
4044 // If only the overflow result is used, and the right hand side is a
4045 // constant (or constant splat), we can remove the intrinsic by directly
4046 // checking for overflow.
4047 if (C) {
4048 // Compute the no-wrap range for LHS given RHS=C, then construct an
4049 // equivalent icmp, potentially using an offset.
4051 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4052
4053 CmpInst::Predicate Pred;
4054 APInt NewRHSC, Offset;
4055 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4056 auto *OpTy = WO->getRHS()->getType();
4057 auto *NewLHS = WO->getLHS();
4058 if (Offset != 0)
4059 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4060 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4061 ConstantInt::get(OpTy, NewRHSC));
4062 }
4063
4064 return nullptr;
4065}
4066
4068 Value *Agg = EV.getAggregateOperand();
4069
4070 if (!EV.hasIndices())
4071 return replaceInstUsesWith(EV, Agg);
4072
4073 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4074 SQ.getWithInstruction(&EV)))
4075 return replaceInstUsesWith(EV, V);
4076
4077 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
4078 // We're extracting from an insertvalue instruction, compare the indices
4079 const unsigned *exti, *exte, *insi, *inse;
4080 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4081 exte = EV.idx_end(), inse = IV->idx_end();
4082 exti != exte && insi != inse;
4083 ++exti, ++insi) {
4084 if (*insi != *exti)
4085 // The insert and extract both reference distinctly different elements.
4086 // This means the extract is not influenced by the insert, and we can
4087 // replace the aggregate operand of the extract with the aggregate
4088 // operand of the insert. i.e., replace
4089 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4090 // %E = extractvalue { i32, { i32 } } %I, 0
4091 // with
4092 // %E = extractvalue { i32, { i32 } } %A, 0
4093 return ExtractValueInst::Create(IV->getAggregateOperand(),
4094 EV.getIndices());
4095 }
4096 if (exti == exte && insi == inse)
4097 // Both iterators are at the end: Index lists are identical. Replace
4098 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4099 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4100 // with "i32 42"
4101 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4102 if (exti == exte) {
4103 // The extract list is a prefix of the insert list. i.e. replace
4104 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4105 // %E = extractvalue { i32, { i32 } } %I, 1
4106 // with
4107 // %X = extractvalue { i32, { i32 } } %A, 1
4108 // %E = insertvalue { i32 } %X, i32 42, 0
4109 // by switching the order of the insert and extract (though the
4110 // insertvalue should be left in, since it may have other uses).
4111 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4112 EV.getIndices());
4113 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4114 ArrayRef(insi, inse));
4115 }
4116 if (insi == inse)
4117 // The insert list is a prefix of the extract list
4118 // We can simply remove the common indices from the extract and make it
4119 // operate on the inserted value instead of the insertvalue result.
4120 // i.e., replace
4121 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4122 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4123 // with
4124 // %E extractvalue { i32 } { i32 42 }, 0
4125 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4126 ArrayRef(exti, exte));
4127 }
4128
4129 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4130 return R;
4131
4132 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4133 // Bail out if the aggregate contains scalable vector type
4134 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4135 STy && STy->isScalableTy())
4136 return nullptr;
4137
4138 // If the (non-volatile) load only has one use, we can rewrite this to a
4139 // load from a GEP. This reduces the size of the load. If a load is used
4140 // only by extractvalue instructions then this either must have been
4141 // optimized before, or it is a struct with padding, in which case we
4142 // don't want to do the transformation as it loses padding knowledge.
4143 if (L->isSimple() && L->hasOneUse()) {
4144 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4145 SmallVector<Value*, 4> Indices;
4146 // Prefix an i32 0 since we need the first element.
4147 Indices.push_back(Builder.getInt32(0));
4148 for (unsigned Idx : EV.indices())
4149 Indices.push_back(Builder.getInt32(Idx));
4150
4151 // We need to insert these at the location of the old load, not at that of
4152 // the extractvalue.
4154 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4155 L->getPointerOperand(), Indices);
4157 // Whatever aliasing information we had for the orignal load must also
4158 // hold for the smaller load, so propagate the annotations.
4159 NL->setAAMetadata(L->getAAMetadata());
4160 // Returning the load directly will cause the main loop to insert it in
4161 // the wrong spot, so use replaceInstUsesWith().
4162 return replaceInstUsesWith(EV, NL);
4163 }
4164 }
4165
4166 if (auto *PN = dyn_cast<PHINode>(Agg))
4167 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4168 return Res;
4169
4170 // Canonicalize extract (select Cond, TV, FV)
4171 // -> select cond, (extract TV), (extract FV)
4172 if (auto *SI = dyn_cast<SelectInst>(Agg))
4173 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4174 return R;
4175
4176 // We could simplify extracts from other values. Note that nested extracts may
4177 // already be simplified implicitly by the above: extract (extract (insert) )
4178 // will be translated into extract ( insert ( extract ) ) first and then just
4179 // the value inserted, if appropriate. Similarly for extracts from single-use
4180 // loads: extract (extract (load)) will be translated to extract (load (gep))
4181 // and if again single-use then via load (gep (gep)) to load (gep).
4182 // However, double extracts from e.g. function arguments or return values
4183 // aren't handled yet.
4184 return nullptr;
4185}
4186
4187/// Return 'true' if the given typeinfo will match anything.
4188static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4189 switch (Personality) {
4193 // The GCC C EH and Rust personality only exists to support cleanups, so
4194 // it's not clear what the semantics of catch clauses are.
4195 return false;
4197 return false;
4199 // While __gnat_all_others_value will match any Ada exception, it doesn't
4200 // match foreign exceptions (or didn't, before gcc-4.7).
4201 return false;
4212 return TypeInfo->isNullValue();
4213 }
4214 llvm_unreachable("invalid enum");
4215}
4216
4217static bool shorter_filter(const Value *LHS, const Value *RHS) {
4218 return
4219 cast<ArrayType>(LHS->getType())->getNumElements()
4220 <
4221 cast<ArrayType>(RHS->getType())->getNumElements();
4222}
4223
4225 // The logic here should be correct for any real-world personality function.
4226 // However if that turns out not to be true, the offending logic can always
4227 // be conditioned on the personality function, like the catch-all logic is.
4228 EHPersonality Personality =
4229 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4230
4231 // Simplify the list of clauses, eg by removing repeated catch clauses
4232 // (these are often created by inlining).
4233 bool MakeNewInstruction = false; // If true, recreate using the following:
4234 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4235 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4236
4237 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4238 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4239 bool isLastClause = i + 1 == e;
4240 if (LI.isCatch(i)) {
4241 // A catch clause.
4242 Constant *CatchClause = LI.getClause(i);
4243 Constant *TypeInfo = CatchClause->stripPointerCasts();
4244
4245 // If we already saw this clause, there is no point in having a second
4246 // copy of it.
4247 if (AlreadyCaught.insert(TypeInfo).second) {
4248 // This catch clause was not already seen.
4249 NewClauses.push_back(CatchClause);
4250 } else {
4251 // Repeated catch clause - drop the redundant copy.
4252 MakeNewInstruction = true;
4253 }
4254
4255 // If this is a catch-all then there is no point in keeping any following
4256 // clauses or marking the landingpad as having a cleanup.
4257 if (isCatchAll(Personality, TypeInfo)) {
4258 if (!isLastClause)
4259 MakeNewInstruction = true;
4260 CleanupFlag = false;
4261 break;
4262 }
4263 } else {
4264 // A filter clause. If any of the filter elements were already caught
4265 // then they can be dropped from the filter. It is tempting to try to
4266 // exploit the filter further by saying that any typeinfo that does not
4267 // occur in the filter can't be caught later (and thus can be dropped).
4268 // However this would be wrong, since typeinfos can match without being
4269 // equal (for example if one represents a C++ class, and the other some
4270 // class derived from it).
4271 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4272 Constant *FilterClause = LI.getClause(i);
4273 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4274 unsigned NumTypeInfos = FilterType->getNumElements();
4275
4276 // An empty filter catches everything, so there is no point in keeping any
4277 // following clauses or marking the landingpad as having a cleanup. By
4278 // dealing with this case here the following code is made a bit simpler.
4279 if (!NumTypeInfos) {
4280 NewClauses.push_back(FilterClause);
4281 if (!isLastClause)
4282 MakeNewInstruction = true;
4283 CleanupFlag = false;
4284 break;
4285 }
4286
4287 bool MakeNewFilter = false; // If true, make a new filter.
4288 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4289 if (isa<ConstantAggregateZero>(FilterClause)) {
4290 // Not an empty filter - it contains at least one null typeinfo.
4291 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4292 Constant *TypeInfo =
4294 // If this typeinfo is a catch-all then the filter can never match.
4295 if (isCatchAll(Personality, TypeInfo)) {
4296 // Throw the filter away.
4297 MakeNewInstruction = true;
4298 continue;
4299 }
4300
4301 // There is no point in having multiple copies of this typeinfo, so
4302 // discard all but the first copy if there is more than one.
4303 NewFilterElts.push_back(TypeInfo);
4304 if (NumTypeInfos > 1)
4305 MakeNewFilter = true;
4306 } else {
4307 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4308 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4309 NewFilterElts.reserve(NumTypeInfos);
4310
4311 // Remove any filter elements that were already caught or that already
4312 // occurred in the filter. While there, see if any of the elements are
4313 // catch-alls. If so, the filter can be discarded.
4314 bool SawCatchAll = false;
4315 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4316 Constant *Elt = Filter->getOperand(j);
4317 Constant *TypeInfo = Elt->stripPointerCasts();
4318 if (isCatchAll(Personality, TypeInfo)) {
4319 // This element is a catch-all. Bail out, noting this fact.
4320 SawCatchAll = true;
4321 break;
4322 }
4323
4324 // Even if we've seen a type in a catch clause, we don't want to
4325 // remove it from the filter. An unexpected type handler may be
4326 // set up for a call site which throws an exception of the same
4327 // type caught. In order for the exception thrown by the unexpected
4328 // handler to propagate correctly, the filter must be correctly
4329 // described for the call site.
4330 //
4331 // Example:
4332 //
4333 // void unexpected() { throw 1;}
4334 // void foo() throw (int) {
4335 // std::set_unexpected(unexpected);
4336 // try {
4337 // throw 2.0;
4338 // } catch (int i) {}
4339 // }
4340
4341 // There is no point in having multiple copies of the same typeinfo in
4342 // a filter, so only add it if we didn't already.
4343 if (SeenInFilter.insert(TypeInfo).second)
4344 NewFilterElts.push_back(cast<Constant>(Elt));
4345 }
4346 // A filter containing a catch-all cannot match anything by definition.
4347 if (SawCatchAll) {
4348 // Throw the filter away.
4349 MakeNewInstruction = true;
4350 continue;
4351 }
4352
4353 // If we dropped something from the filter, make a new one.
4354 if (NewFilterElts.size() < NumTypeInfos)
4355 MakeNewFilter = true;
4356 }
4357 if (MakeNewFilter) {
4358 FilterType = ArrayType::get(FilterType->getElementType(),
4359 NewFilterElts.size());
4360 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4361 MakeNewInstruction = true;
4362 }
4363
4364 NewClauses.push_back(FilterClause);
4365
4366 // If the new filter is empty then it will catch everything so there is
4367 // no point in keeping any following clauses or marking the landingpad
4368 // as having a cleanup. The case of the original filter being empty was
4369 // already handled above.
4370 if (MakeNewFilter && !NewFilterElts.size()) {
4371 assert(MakeNewInstruction && "New filter but not a new instruction!");
4372 CleanupFlag = false;
4373 break;
4374 }
4375 }
4376 }
4377
4378 // If several filters occur in a row then reorder them so that the shortest
4379 // filters come first (those with the smallest number of elements). This is
4380 // advantageous because shorter filters are more likely to match, speeding up
4381 // unwinding, but mostly because it increases the effectiveness of the other
4382 // filter optimizations below.
4383 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4384 unsigned j;
4385 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4386 for (j = i; j != e; ++j)
4387 if (!isa<ArrayType>(NewClauses[j]->getType()))
4388 break;
4389
4390 // Check whether the filters are already sorted by length. We need to know
4391 // if sorting them is actually going to do anything so that we only make a
4392 // new landingpad instruction if it does.
4393 for (unsigned k = i; k + 1 < j; ++k)
4394 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4395 // Not sorted, so sort the filters now. Doing an unstable sort would be
4396 // correct too but reordering filters pointlessly might confuse users.
4397 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4399 MakeNewInstruction = true;
4400 break;
4401 }
4402
4403 // Look for the next batch of filters.
4404 i = j + 1;
4405 }
4406
4407 // If typeinfos matched if and only if equal, then the elements of a filter L
4408 // that occurs later than a filter F could be replaced by the intersection of
4409 // the elements of F and L. In reality two typeinfos can match without being
4410 // equal (for example if one represents a C++ class, and the other some class
4411 // derived from it) so it would be wrong to perform this transform in general.
4412 // However the transform is correct and useful if F is a subset of L. In that
4413 // case L can be replaced by F, and thus removed altogether since repeating a
4414 // filter is pointless. So here we look at all pairs of filters F and L where
4415 // L follows F in the list of clauses, and remove L if every element of F is
4416 // an element of L. This can occur when inlining C++ functions with exception
4417 // specifications.
4418 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4419 // Examine each filter in turn.
4420 Value *Filter = NewClauses[i];
4421 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4422 if (!FTy)
4423 // Not a filter - skip it.
4424 continue;
4425 unsigned FElts = FTy->getNumElements();
4426 // Examine each filter following this one. Doing this backwards means that
4427 // we don't have to worry about filters disappearing under us when removed.
4428 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4429 Value *LFilter = NewClauses[j];
4430 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4431 if (!LTy)
4432 // Not a filter - skip it.
4433 continue;
4434 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4435 // an element of LFilter, then discard LFilter.
4436 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4437 // If Filter is empty then it is a subset of LFilter.
4438 if (!FElts) {
4439 // Discard LFilter.
4440 NewClauses.erase(J);
4441 MakeNewInstruction = true;
4442 // Move on to the next filter.
4443 continue;
4444 }
4445 unsigned LElts = LTy->getNumElements();
4446 // If Filter is longer than LFilter then it cannot be a subset of it.
4447 if (FElts > LElts)
4448 // Move on to the next filter.
4449 continue;
4450 // At this point we know that LFilter has at least one element.
4451 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4452 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4453 // already know that Filter is not longer than LFilter).
4454 if (isa<ConstantAggregateZero>(Filter)) {
4455 assert(FElts <= LElts && "Should have handled this case earlier!");
4456 // Discard LFilter.
4457 NewClauses.erase(J);
4458 MakeNewInstruction = true;
4459 }
4460 // Move on to the next filter.
4461 continue;
4462 }
4463 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4464 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4465 // Since Filter is non-empty and contains only zeros, it is a subset of
4466 // LFilter iff LFilter contains a zero.
4467 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4468 for (unsigned l = 0; l != LElts; ++l)
4469 if (LArray->getOperand(l)->isNullValue()) {
4470 // LFilter contains a zero - discard it.
4471 NewClauses.erase(J);
4472 MakeNewInstruction = true;
4473 break;
4474 }
4475 // Move on to the next filter.
4476 continue;
4477 }
4478 // At this point we know that both filters are ConstantArrays. Loop over
4479 // operands to see whether every element of Filter is also an element of
4480 // LFilter. Since filters tend to be short this is probably faster than
4481 // using a method that scales nicely.
4482 ConstantArray *FArray = cast<ConstantArray>(Filter);
4483 bool AllFound = true;
4484 for (unsigned f = 0; f != FElts; ++f) {
4485 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4486 AllFound = false;
4487 for (unsigned l = 0; l != LElts; ++l) {
4488 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4489 if (LTypeInfo == FTypeInfo) {
4490 AllFound = true;
4491 break;
4492 }
4493 }
4494 if (!AllFound)
4495 break;
4496 }
4497 if (AllFound) {
4498 // Discard LFilter.
4499 NewClauses.erase(J);
4500 MakeNewInstruction = true;
4501 }
4502 // Move on to the next filter.
4503 }
4504 }
4505
4506 // If we changed any of the clauses, replace the old landingpad instruction
4507 // with a new one.
4508 if (MakeNewInstruction) {
4510 NewClauses.size());
4511 for (Constant *C : NewClauses)
4512 NLI->addClause(C);
4513 // A landing pad with no clauses must have the cleanup flag set. It is
4514 // theoretically possible, though highly unlikely, that we eliminated all
4515 // clauses. If so, force the cleanup flag to true.
4516 if (NewClauses.empty())
4517 CleanupFlag = true;
4518 NLI->setCleanup(CleanupFlag);
4519 return NLI;
4520 }
4521
4522 // Even if none of the clauses changed, we may nonetheless have understood
4523 // that the cleanup flag is pointless. Clear it if so.
4524 if (LI.isCleanup() != CleanupFlag) {
4525 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4526 LI.setCleanup(CleanupFlag);
4527 return &LI;
4528 }
4529
4530 return nullptr;
4531}
4532
4533Value *
4535 // Try to push freeze through instructions that propagate but don't produce
4536 // poison as far as possible. If an operand of freeze follows three
4537 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
4538 // guaranteed-non-poison operands then push the freeze through to the one
4539 // operand that is not guaranteed non-poison. The actual transform is as
4540 // follows.
4541 // Op1 = ... ; Op1 can be posion
4542 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
4543 // ; single guaranteed-non-poison operands
4544 // ... = Freeze(Op0)
4545 // =>
4546 // Op1 = ...
4547 // Op1.fr = Freeze(Op1)
4548 // ... = Inst(Op1.fr, NonPoisonOps...)
4549 auto *OrigOp = OrigFI.getOperand(0);
4550 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4551
4552 // While we could change the other users of OrigOp to use freeze(OrigOp), that
4553 // potentially reduces their optimization potential, so let's only do this iff
4554 // the OrigOp is only used by the freeze.
4555 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4556 return nullptr;
4557
4558 // We can't push the freeze through an instruction which can itself create
4559 // poison. If the only source of new poison is flags, we can simply
4560 // strip them (since we know the only use is the freeze and nothing can
4561 // benefit from them.)
4562 if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
4563 /*ConsiderFlagsAndMetadata*/ false))
4564 return nullptr;
4565
4566 // If operand is guaranteed not to be poison, there is no need to add freeze
4567 // to the operand. So we first find the operand that is not guaranteed to be
4568 // poison.
4569 Use *MaybePoisonOperand = nullptr;
4570 for (Use &U : OrigOpInst->operands()) {
4571 if (isa<MetadataAsValue>(U.get()) ||
4573 continue;
4574 if (!MaybePoisonOperand)
4575 MaybePoisonOperand = &U;
4576 else
4577 return nullptr;
4578 }
4579
4580 OrigOpInst->dropPoisonGeneratingAnnotations();
4581
4582 // If all operands are guaranteed to be non-poison, we can drop freeze.
4583 if (!MaybePoisonOperand)
4584 return OrigOp;
4585
4586 Builder.SetInsertPoint(OrigOpInst);
4587 auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
4588 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
4589
4590 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4591 return OrigOp;
4592}
4593
4595 PHINode *PN) {
4596 // Detect whether this is a recurrence with a start value and some number of
4597 // backedge values. We'll check whether we can push the freeze through the
4598 // backedge values (possibly dropping poison flags along the way) until we
4599 // reach the phi again. In that case, we can move the freeze to the start
4600 // value.
4601 Use *StartU = nullptr;
4603 for (Use &U : PN->incoming_values()) {
4604 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
4605 // Add backedge value to worklist.
4606 Worklist.push_back(U.get());
4607 continue;
4608 }
4609
4610 // Don't bother handling multiple start values.
4611 if (StartU)
4612 return nullptr;
4613 StartU = &U;
4614 }
4615
4616 if (!StartU || Worklist.empty())
4617 return nullptr; // Not a recurrence.
4618
4619 Value *StartV = StartU->get();
4620 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
4621 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
4622 // We can't insert freeze if the start value is the result of the
4623 // terminator (e.g. an invoke).
4624 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
4625 return nullptr;
4626
4629 while (!Worklist.empty()) {
4630 Value *V = Worklist.pop_back_val();
4631 if (!Visited.insert(V).second)
4632 continue;
4633
4634 if (Visited.size() > 32)
4635 return nullptr; // Limit the total number of values we inspect.
4636
4637 // Assume that PN is non-poison, because it will be after the transform.
4638 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
4639 continue;
4640
4641 Instruction *I = dyn_cast<Instruction>(V);
4642 if (!I || canCreateUndefOrPoison(cast<Operator>(I),
4643 /*ConsiderFlagsAndMetadata*/ false))
4644 return nullptr;
4645
4646 DropFlags.push_back(I);
4647 append_range(Worklist, I->operands());
4648 }
4649
4650 for (Instruction *I : DropFlags)
4651 I->dropPoisonGeneratingAnnotations();
4652
4653 if (StartNeedsFreeze) {
4655 Value *FrozenStartV = Builder.CreateFreeze(StartV,
4656 StartV->getName() + ".fr");
4657 replaceUse(*StartU, FrozenStartV);
4658 }
4659 return replaceInstUsesWith(FI, PN);
4660}
4661
4663 Value *Op = FI.getOperand(0);
4664
4665 if (isa<Constant>(Op) || Op->hasOneUse())
4666 return false;
4667
4668 // Move the freeze directly after the definition of its operand, so that
4669 // it dominates the maximum number of uses. Note that it may not dominate
4670 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
4671 // the normal/default destination. This is why the domination check in the
4672 // replacement below is still necessary.
4673 BasicBlock::iterator MoveBefore;
4674 if (isa<Argument>(Op)) {
4675 MoveBefore =
4677 } else {
4678 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
4679 if (!MoveBeforeOpt)
4680 return false;
4681 MoveBefore = *MoveBeforeOpt;
4682 }
4683
4684 // Don't move to the position of a debug intrinsic.
4685 if (isa<DbgInfoIntrinsic>(MoveBefore))
4686 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4687 // Re-point iterator to come after any debug-info records, if we're
4688 // running in "RemoveDIs" mode
4689 MoveBefore.setHeadBit(false);
4690
4691 bool Changed = false;
4692 if (&FI != &*MoveBefore) {
4693 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
4694 Changed = true;
4695 }
4696
4697 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
4698 bool Dominates = DT.dominates(&FI, U);
4699 Changed |= Dominates;
4700 return Dominates;
4701 });
4702
4703 return Changed;
4704}
4705
4706// Check if any direct or bitcast user of this value is a shuffle instruction.
4708 for (auto *U : V->users()) {
4709 if (isa<ShuffleVectorInst>(U))
4710 return true;
4711 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
4712 return true;
4713 }
4714 return false;
4715}
4716
4718 Value *Op0 = I.getOperand(0);
4719
4721 return replaceInstUsesWith(I, V);
4722
4723 // freeze (phi const, x) --> phi const, (freeze x)
4724 if (auto *PN = dyn_cast<PHINode>(Op0)) {
4725 if (Instruction *NV = foldOpIntoPhi(I, PN))
4726 return NV;
4727 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
4728 return NV;
4729 }
4730
4732 return replaceInstUsesWith(I, NI);
4733
4734 // If I is freeze(undef), check its uses and fold it to a fixed constant.
4735 // - or: pick -1
4736 // - select's condition: if the true value is constant, choose it by making
4737 // the condition true.
4738 // - default: pick 0
4739 //
4740 // Note that this transform is intentionally done here rather than
4741 // via an analysis in InstSimplify or at individual user sites. That is
4742 // because we must produce the same value for all uses of the freeze -
4743 // it's the reason "freeze" exists!
4744 //
4745 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
4746 // duplicating logic for binops at least.
4747 auto getUndefReplacement = [&I](Type *Ty) {
4748 Constant *BestValue = nullptr;
4749 Constant *NullValue = Constant::getNullValue(Ty);
4750 for (const auto *U : I.users()) {
4751 Constant *C = NullValue;
4752 if (match(U, m_Or(m_Value(), m_Value())))
4754 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
4755 C = ConstantInt::getTrue(Ty);
4756
4757 if (!BestValue)
4758 BestValue = C;
4759 else if (BestValue != C)
4760 BestValue = NullValue;
4761 }
4762 assert(BestValue && "Must have at least one use");
4763 return BestValue;
4764 };
4765
4766 if (match(Op0, m_Undef())) {
4767 // Don't fold freeze(undef/poison) if it's used as a vector operand in
4768 // a shuffle. This may improve codegen for shuffles that allow
4769 // unspecified inputs.
4771 return nullptr;
4772 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
4773 }
4774
4775 Constant *C;
4776 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
4777 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
4779 }
4780
4781 // Replace uses of Op with freeze(Op).
4782 if (freezeOtherUses(I))
4783 return &I;
4784
4785 return nullptr;
4786}
4787
4788/// Check for case where the call writes to an otherwise dead alloca. This
4789/// shows up for unused out-params in idiomatic C/C++ code. Note that this
4790/// helper *only* analyzes the write; doesn't check any other legality aspect.
4792 auto *CB = dyn_cast<CallBase>(I);
4793 if (!CB)
4794 // TODO: handle e.g. store to alloca here - only worth doing if we extend
4795 // to allow reload along used path as described below. Otherwise, this
4796 // is simply a store to a dead allocation which will be removed.
4797 return false;
4798 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
4799 if (!Dest)
4800 return false;
4801 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
4802 if (!AI)
4803 // TODO: allow malloc?
4804 return false;
4805 // TODO: allow memory access dominated by move point? Note that since AI
4806 // could have a reference to itself captured by the call, we would need to
4807 // account for cycles in doing so.
4808 SmallVector<const User *> AllocaUsers;
4810 auto pushUsers = [&](const Instruction &I) {
4811 for (const User *U : I.users()) {
4812 if (Visited.insert(U).second)
4813 AllocaUsers.push_back(U);
4814 }
4815 };
4816 pushUsers(*AI);
4817 while (!AllocaUsers.empty()) {
4818 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
4819 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
4820 pushUsers(*UserI);
4821 continue;
4822 }
4823 if (UserI == CB)
4824 continue;
4825 // TODO: support lifetime.start/end here
4826 return false;
4827 }
4828 return true;
4829}
4830
4831/// Try to move the specified instruction from its current block into the
4832/// beginning of DestBlock, which can only happen if it's safe to move the
4833/// instruction past all of the instructions between it and the end of its
4834/// block.
4836 BasicBlock *DestBlock) {
4837 BasicBlock *SrcBlock = I->getParent();
4838
4839 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4840 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
4841 I->isTerminator())
4842 return false;
4843
4844 // Do not sink static or dynamic alloca instructions. Static allocas must
4845 // remain in the entry block, and dynamic allocas must not be sunk in between
4846 // a stacksave / stackrestore pair, which would incorrectly shorten its
4847 // lifetime.
4848 if (isa<AllocaInst>(I))
4849 return false;
4850
4851 // Do not sink into catchswitch blocks.
4852 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
4853 return false;
4854
4855 // Do not sink convergent call instructions.
4856 if (auto *CI = dyn_cast<CallInst>(I)) {
4857 if (CI->isConvergent())
4858 return false;
4859 }
4860
4861 // Unless we can prove that the memory write isn't visibile except on the
4862 // path we're sinking to, we must bail.
4863 if (I->mayWriteToMemory()) {
4864 if (!SoleWriteToDeadLocal(I, TLI))
4865 return false;
4866 }
4867
4868 // We can only sink load instructions if there is nothing between the load and
4869 // the end of block that could change the value.
4870 if (I->mayReadFromMemory() &&
4871 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
4872 // We don't want to do any sophisticated alias analysis, so we only check
4873 // the instructions after I in I's parent block if we try to sink to its
4874 // successor block.
4875 if (DestBlock->getUniquePredecessor() != I->getParent())
4876 return false;
4877 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
4878 E = I->getParent()->end();
4879 Scan != E; ++Scan)
4880 if (Scan->mayWriteToMemory())
4881 return false;
4882 }
4883
4884 I->dropDroppableUses([&](const Use *U) {
4885 auto *I = dyn_cast<Instruction>(U->getUser());
4886 if (I && I->getParent() != DestBlock) {
4887 Worklist.add(I);
4888 return true;
4889 }
4890 return false;
4891 });
4892 /// FIXME: We could remove droppable uses that are not dominated by
4893 /// the new position.
4894
4895 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
4896 I->moveBefore(*DestBlock, InsertPos);
4897 ++NumSunkInst;
4898
4899 // Also sink all related debug uses from the source basic block. Otherwise we
4900 // get debug use before the def. Attempt to salvage debug uses first, to
4901 // maximise the range variables have location for. If we cannot salvage, then
4902 // mark the location undef: we know it was supposed to receive a new location
4903 // here, but that computation has been sunk.
4905 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
4906 findDbgUsers(DbgUsers, I, &DbgVariableRecords);
4907 if (!DbgUsers.empty())
4908 tryToSinkInstructionDbgValues(I, InsertPos, SrcBlock, DestBlock, DbgUsers);
4909 if (!DbgVariableRecords.empty())
4910 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
4911 DbgVariableRecords);
4912
4913 // PS: there are numerous flaws with this behaviour, not least that right now
4914 // assignments can be re-ordered past other assignments to the same variable
4915 // if they use different Values. Creating more undef assignements can never be
4916 // undone. And salvaging all users outside of this block can un-necessarily
4917 // alter the lifetime of the live-value that the variable refers to.
4918 // Some of these things can be resolved by tolerating debug use-before-defs in
4919 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
4920 // being used for more architectures.
4921
4922 return true;
4923}
4924
4926 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4928 // For all debug values in the destination block, the sunk instruction
4929 // will still be available, so they do not need to be dropped.
4931 for (auto &DbgUser : DbgUsers)
4932 if (DbgUser->getParent() != DestBlock)
4933 DbgUsersToSalvage.push_back(DbgUser);
4934
4935 // Process the sinking DbgUsersToSalvage in reverse order, as we only want
4936 // to clone the last appearing debug intrinsic for each given variable.
4938 for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage)
4939 if (DVI->getParent() == SrcBlock)
4940 DbgUsersToSink.push_back(DVI);
4941 llvm::sort(DbgUsersToSink,
4942 [](auto *A, auto *B) { return B->comesBefore(A); });
4943
4945 SmallSet<DebugVariable, 4> SunkVariables;
4946 for (auto *User : DbgUsersToSink) {
4947 // A dbg.declare instruction should not be cloned, since there can only be
4948 // one per variable fragment. It should be left in the original place
4949 // because the sunk instruction is not an alloca (otherwise we could not be
4950 // here).
4951 if (isa<DbgDeclareInst>(User))
4952 continue;
4953
4954 DebugVariable DbgUserVariable =
4955 DebugVariable(User->getVariable(), User->getExpression(),
4956 User->getDebugLoc()->getInlinedAt());
4957
4958 if (!SunkVariables.insert(DbgUserVariable).second)
4959 continue;
4960
4961 // Leave dbg.assign intrinsics in their original positions and there should
4962 // be no need to insert a clone.
4963 if (isa<DbgAssignIntrinsic>(User))
4964 continue;
4965
4966 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
4967 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
4968 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
4969 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
4970 }
4971
4972 // Perform salvaging without the clones, then sink the clones.
4973 if (!DIIClones.empty()) {
4974 salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, {});
4975 // The clones are in reverse order of original appearance, reverse again to
4976 // maintain the original order.
4977 for (auto &DIIClone : llvm::reverse(DIIClones)) {
4978 DIIClone->insertBefore(&*InsertPos);
4979 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
4980 }
4981 }
4982}
4983
4985 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4986 BasicBlock *DestBlock,
4987 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
4988 // Implementation of tryToSinkInstructionDbgValues, but for the
4989 // DbgVariableRecord of variable assignments rather than dbg.values.
4990
4991 // Fetch all DbgVariableRecords not already in the destination.
4992 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
4993 for (auto &DVR : DbgVariableRecords)
4994 if (DVR->getParent() != DestBlock)
4995 DbgVariableRecordsToSalvage.push_back(DVR);
4996
4997 // Fetch a second collection, of DbgVariableRecords in the source block that
4998 // we're going to sink.
4999 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5000 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5001 if (DVR->getParent() == SrcBlock)
5002 DbgVariableRecordsToSink.push_back(DVR);
5003
5004 // Sort DbgVariableRecords according to their position in the block. This is a
5005 // partial order: DbgVariableRecords attached to different instructions will
5006 // be ordered by the instruction order, but DbgVariableRecords attached to the
5007 // same instruction won't have an order.
5008 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5009 return B->getInstruction()->comesBefore(A->getInstruction());
5010 };
5011 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5012
5013 // If there are two assignments to the same variable attached to the same
5014 // instruction, the ordering between the two assignments is important. Scan
5015 // for this (rare) case and establish which is the last assignment.
5016 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5018 if (DbgVariableRecordsToSink.size() > 1) {
5020 // Count how many assignments to each variable there is per instruction.
5021 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5022 DebugVariable DbgUserVariable =
5023 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5024 DVR->getDebugLoc()->getInlinedAt());
5025 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5026 }
5027
5028 // If there are any instructions with two assignments, add them to the
5029 // FilterOutMap to record that they need extra filtering.
5031 for (auto It : CountMap) {
5032 if (It.second > 1) {
5033 FilterOutMap[It.first] = nullptr;
5034 DupSet.insert(It.first.first);
5035 }
5036 }
5037
5038 // For all instruction/variable pairs needing extra filtering, find the
5039 // latest assignment.
5040 for (const Instruction *Inst : DupSet) {
5041 for (DbgVariableRecord &DVR :
5042 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5043 DebugVariable DbgUserVariable =
5044 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5045 DVR.getDebugLoc()->getInlinedAt());
5046 auto FilterIt =
5047 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5048 if (FilterIt == FilterOutMap.end())
5049 continue;
5050 if (FilterIt->second != nullptr)
5051 continue;
5052 FilterIt->second = &DVR;
5053 }
5054 }
5055 }
5056
5057 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5058 // out any duplicate assignments identified above.
5060 SmallSet<DebugVariable, 4> SunkVariables;
5061 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5063 continue;
5064
5065 DebugVariable DbgUserVariable =
5066 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5067 DVR->getDebugLoc()->getInlinedAt());
5068
5069 // For any variable where there were multiple assignments in the same place,
5070 // ignore all but the last assignment.
5071 if (!FilterOutMap.empty()) {
5072 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5073 auto It = FilterOutMap.find(IVP);
5074
5075 // Filter out.
5076 if (It != FilterOutMap.end() && It->second != DVR)
5077 continue;
5078 }
5079
5080 if (!SunkVariables.insert(DbgUserVariable).second)
5081 continue;
5082
5083 if (DVR->isDbgAssign())
5084 continue;
5085
5086 DVRClones.emplace_back(DVR->clone());
5087 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5088 }
5089
5090 // Perform salvaging without the clones, then sink the clones.
5091 if (DVRClones.empty())
5092 return;
5093
5094 salvageDebugInfoForDbgValues(*I, {}, DbgVariableRecordsToSalvage);
5095
5096 // The clones are in reverse order of original appearance. Assert that the
5097 // head bit is set on the iterator as we _should_ have received it via
5098 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5099 // we'll repeatedly insert at the head, such as:
5100 // DVR-3 (third insertion goes here)
5101 // DVR-2 (second insertion goes here)
5102 // DVR-1 (first insertion goes here)
5103 // Any-Prior-DVRs
5104 // InsertPtInst
5105 assert(InsertPos.getHeadBit());
5106 for (DbgVariableRecord *DVRClone : DVRClones) {
5107 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5108 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5109 }
5110}
5111
5113 while (!Worklist.isEmpty()) {
5114 // Walk deferred instructions in reverse order, and push them to the
5115 // worklist, which means they'll end up popped from the worklist in-order.
5116 while (Instruction *I = Worklist.popDeferred()) {
5117 // Check to see if we can DCE the instruction. We do this already here to
5118 // reduce the number of uses and thus allow other folds to trigger.
5119 // Note that eraseInstFromFunction() may push additional instructions on
5120 // the deferred worklist, so this will DCE whole instruction chains.
5123 ++NumDeadInst;
5124 continue;
5125 }
5126
5127 Worklist.push(I);
5128 }
5129
5131 if (I == nullptr) continue; // skip null values.
5132
5133 // Check to see if we can DCE the instruction.
5136 ++NumDeadInst;
5137 continue;
5138 }
5139
5140 if (!DebugCounter::shouldExecute(VisitCounter))
5141 continue;
5142
5143 // See if we can trivially sink this instruction to its user if we can
5144 // prove that the successor is not executed more frequently than our block.
5145 // Return the UserBlock if successful.
5146 auto getOptionalSinkBlockForInst =
5147 [this](Instruction *I) -> std::optional<BasicBlock *> {
5148 if (!EnableCodeSinking)
5149 return std::nullopt;
5150
5151 BasicBlock *BB = I->getParent();
5152 BasicBlock *UserParent = nullptr;
5153 unsigned NumUsers = 0;
5154
5155 for (Use &U : I->uses()) {
5156 User *User = U.getUser();
5157 if (User->isDroppable())
5158 continue;
5159 if (NumUsers > MaxSinkNumUsers)
5160 return std::nullopt;
5161
5162 Instruction *UserInst = cast<Instruction>(User);
5163 // Special handling for Phi nodes - get the block the use occurs in.
5164 BasicBlock *UserBB = UserInst->getParent();
5165 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5166 UserBB = PN->getIncomingBlock(U);
5167 // Bail out if we have uses in different blocks. We don't do any
5168 // sophisticated analysis (i.e finding NearestCommonDominator of these
5169 // use blocks).
5170 if (UserParent && UserParent != UserBB)
5171 return std::nullopt;
5172 UserParent = UserBB;
5173
5174 // Make sure these checks are done only once, naturally we do the checks
5175 // the first time we get the userparent, this will save compile time.
5176 if (NumUsers == 0) {
5177 // Try sinking to another block. If that block is unreachable, then do
5178 // not bother. SimplifyCFG should handle it.
5179 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5180 return std::nullopt;
5181
5182 auto *Term = UserParent->getTerminator();
5183 // See if the user is one of our successors that has only one
5184 // predecessor, so that we don't have to split the critical edge.
5185 // Another option where we can sink is a block that ends with a
5186 // terminator that does not pass control to other block (such as
5187 // return or unreachable or resume). In this case:
5188 // - I dominates the User (by SSA form);
5189 // - the User will be executed at most once.
5190 // So sinking I down to User is always profitable or neutral.
5191 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5192 return std::nullopt;
5193
5194 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5195 }
5196
5197 NumUsers++;
5198 }
5199
5200 // No user or only has droppable users.
5201 if (!UserParent)
5202 return std::nullopt;
5203
5204 return UserParent;
5205 };
5206
5207 auto OptBB = getOptionalSinkBlockForInst(I);
5208 if (OptBB) {
5209 auto *UserParent = *OptBB;
5210 // Okay, the CFG is simple enough, try to sink this instruction.
5211 if (tryToSinkInstruction(I, UserParent)) {
5212 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5213 MadeIRChange = true;
5214 // We'll add uses of the sunk instruction below, but since
5215 // sinking can expose opportunities for it's *operands* add
5216 // them to the worklist
5217 for (Use &U : I->operands())
5218 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5219 Worklist.push(OpI);
5220 }
5221 }
5222
5223 // Now that we have an instruction, try combining it to simplify it.
5226 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5227
5228#ifndef NDEBUG
5229 std::string OrigI;
5230#endif
5231 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5232 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5233
5234 if (Instruction *Result = visit(*I)) {
5235 ++NumCombined;
5236 // Should we replace the old instruction with a new one?
5237 if (Result != I) {
5238 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5239 << " New = " << *Result << '\n');
5240
5241 // We copy the old instruction's DebugLoc to the new instruction, unless
5242 // InstCombine already assigned a DebugLoc to it, in which case we
5243 // should trust the more specifically selected DebugLoc.
5244 if (!Result->getDebugLoc())
5245 Result->setDebugLoc(I->getDebugLoc());
5246 // We also copy annotation metadata to the new instruction.
5247 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5248 // Everything uses the new instruction now.
5249 I->replaceAllUsesWith(Result);
5250
5251 // Move the name to the new instruction first.
5252 Result->takeName(I);
5253
5254 // Insert the new instruction into the basic block...
5255 BasicBlock *InstParent = I->getParent();
5256 BasicBlock::iterator InsertPos = I->getIterator();
5257
5258 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5259 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5260 // We need to fix up the insertion point.
5261 if (isa<PHINode>(I)) // PHI -> Non-PHI
5262 InsertPos = InstParent->getFirstInsertionPt();
5263 else // Non-PHI -> PHI
5264 InsertPos = InstParent->getFirstNonPHIIt();
5265 }
5266
5267 Result->insertInto(InstParent, InsertPos);
5268
5269 // Push the new instruction and any users onto the worklist.
5271 Worklist.push(Result);
5272
5274 } else {
5275 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5276 << " New = " << *I << '\n');
5277
5278 // If the instruction was modified, it's possible that it is now dead.
5279 // if so, remove it.
5282 } else {
5284 Worklist.push(I);
5285 }
5286 }
5287 MadeIRChange = true;
5288 }
5289 }
5290
5291 Worklist.zap();
5292 return MadeIRChange;
5293}
5294
5295// Track the scopes used by !alias.scope and !noalias. In a function, a
5296// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5297// by both sets. If not, the declaration of the scope can be safely omitted.
5298// The MDNode of the scope can be omitted as well for the instructions that are
5299// part of this function. We do not do that at this point, as this might become
5300// too time consuming to do.
5302 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5303 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5304
5305public:
5307 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5308 if (!I->hasMetadataOtherThanDebugLoc())
5309 return;
5310
5311 auto Track = [](Metadata *ScopeList, auto &Container) {
5312 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5313 if (!MDScopeList || !Container.insert(MDScopeList).second)
5314 return;
5315 for (const auto &MDOperand : MDScopeList->operands())
5316 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5317 Container.insert(MDScope);
5318 };
5319
5320 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5321 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5322 }
5323
5325 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
5326 if (!Decl)
5327 return false;
5328
5329 assert(Decl->use_empty() &&
5330 "llvm.experimental.noalias.scope.decl in use ?");
5331 const MDNode *MDSL = Decl->getScopeList();
5332 assert(MDSL->getNumOperands() == 1 &&
5333 "llvm.experimental.noalias.scope should refer to a single scope");
5334 auto &MDOperand = MDSL->getOperand(0);
5335 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5336 return !UsedAliasScopesAndLists.contains(MD) ||
5337 !UsedNoAliasScopesAndLists.contains(MD);
5338
5339 // Not an MDNode ? throw away.
5340 return true;
5341 }
5342};
5343
5344/// Populate the IC worklist from a function, by walking it in reverse
5345/// post-order and adding all reachable code to the worklist.
5346///
5347/// This has a couple of tricks to make the code faster and more powerful. In
5348/// particular, we constant fold and DCE instructions as we go, to avoid adding
5349/// them to the worklist (this significantly speeds up instcombine on code where
5350/// many instructions are dead or constant). Additionally, if we find a branch
5351/// whose condition is a known constant, we only visit the reachable successors.
5353 bool MadeIRChange = false;
5355 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5356 DenseMap<Constant *, Constant *> FoldedConstants;
5357 AliasScopeTracker SeenAliasScopes;
5358
5359 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5360 for (BasicBlock *Succ : successors(BB))
5361 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5362 for (PHINode &PN : Succ->phis())
5363 for (Use &U : PN.incoming_values())
5364 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5365 U.set(PoisonValue::get(PN.getType()));
5366 MadeIRChange = true;
5367 }
5368 };
5369
5370 for (BasicBlock *BB : RPOT) {
5371 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5372 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5373 })) {
5374 HandleOnlyLiveSuccessor(BB, nullptr);
5375 continue;
5376 }
5377 LiveBlocks.insert(BB);
5378
5379 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5380 // ConstantProp instruction if trivially constant.
5381 if (!Inst.use_empty() &&
5382 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5383 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5384 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5385 << '\n');
5386 Inst.replaceAllUsesWith(C);
5387 ++NumConstProp;
5388 if (isInstructionTriviallyDead(&Inst, &TLI))
5389 Inst.eraseFromParent();
5390 MadeIRChange = true;
5391 continue;
5392 }
5393
5394 // See if we can constant fold its operands.
5395 for (Use &U : Inst.operands()) {
5396 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5397 continue;
5398
5399 auto *C = cast<Constant>(U);
5400 Constant *&FoldRes = FoldedConstants[C];
5401 if (!FoldRes)
5402 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5403
5404 if (FoldRes != C) {
5405 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5406 << "\n Old = " << *C
5407 << "\n New = " << *FoldRes << '\n');
5408 U = FoldRes;
5409 MadeIRChange = true;
5410 }
5411 }
5412
5413 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5414 // these call instructions consumes non-trivial amount of time and
5415 // provides no value for the optimization.
5416 if (!Inst.isDebugOrPseudoInst()) {
5417 InstrsForInstructionWorklist.push_back(&Inst);
5418 SeenAliasScopes.analyse(&Inst);
5419 }
5420 }
5421
5422 // If this is a branch or switch on a constant, mark only the single
5423 // live successor. Otherwise assume all successors are live.
5424 Instruction *TI = BB->getTerminator();
5425 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5426 if (isa<UndefValue>(BI->getCondition())) {
5427 // Branch on undef is UB.
5428 HandleOnlyLiveSuccessor(BB, nullptr);
5429 continue;
5430 }
5431 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5432 bool CondVal = Cond->getZExtValue();
5433 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5434 continue;
5435 }
5436 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5437 if (isa<UndefValue>(SI->getCondition())) {
5438 // Switch on undef is UB.
5439 HandleOnlyLiveSuccessor(BB, nullptr);
5440 continue;
5441 }
5442 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5443 HandleOnlyLiveSuccessor(BB,
5444 SI->findCaseValue(Cond)->getCaseSuccessor());
5445 continue;
5446 }
5447 }
5448 }
5449
5450 // Remove instructions inside unreachable blocks. This prevents the
5451 // instcombine code from having to deal with some bad special cases, and
5452 // reduces use counts of instructions.
5453 for (BasicBlock &BB : F) {
5454 if (LiveBlocks.count(&BB))
5455 continue;
5456
5457 unsigned NumDeadInstInBB;
5458 unsigned NumDeadDbgInstInBB;
5459 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5461
5462 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5463 NumDeadInst += NumDeadInstInBB;
5464 }
5465
5466 // Once we've found all of the instructions to add to instcombine's worklist,
5467 // add them in reverse order. This way instcombine will visit from the top
5468 // of the function down. This jives well with the way that it adds all uses
5469 // of instructions to the worklist after doing a transformation, thus avoiding
5470 // some N^2 behavior in pathological cases.
5471 Worklist.reserve(InstrsForInstructionWorklist.size());
5472 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5473 // DCE instruction if trivially dead. As we iterate in reverse program
5474 // order here, we will clean up whole chains of dead instructions.
5475 if (isInstructionTriviallyDead(Inst, &TLI) ||
5476 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5477 ++NumDeadInst;
5478 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5479 salvageDebugInfo(*Inst);
5480 Inst->eraseFromParent();
5481 MadeIRChange = true;
5482 continue;
5483 }
5484
5485 Worklist.push(Inst);
5486 }
5487
5488 return MadeIRChange;
5489}
5490
5492 // Collect backedges.
5494 for (BasicBlock *BB : RPOT) {
5495 Visited.insert(BB);
5496 for (BasicBlock *Succ : successors(BB))
5497 if (Visited.contains(Succ))
5498 BackEdges.insert({BB, Succ});
5499 }
5500 ComputedBackEdges = true;
5501}
5502
5508 const InstCombineOptions &Opts) {
5509 auto &DL = F.getDataLayout();
5510 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5511 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5512
5513 /// Builder - This is an IRBuilder that automatically inserts new
5514 /// instructions into the worklist when they are created.
5516 F.getContext(), TargetFolder(DL),
5517 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5518 Worklist.add(I);
5519 if (auto *Assume = dyn_cast<AssumeInst>(I))
5520 AC.registerAssumption(Assume);
5521 }));
5522
5524
5525 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5526 // by instcombiner.
5527 bool MadeIRChange = false;
5529 MadeIRChange = LowerDbgDeclare(F);
5530
5531 // Iterate while there is work to do.
5532 unsigned Iteration = 0;
5533 while (true) {
5534 ++Iteration;
5535
5536 if (Iteration > Opts.MaxIterations && !VerifyFixpoint) {
5537 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5538 << " on " << F.getName()
5539 << " reached; stopping without verifying fixpoint\n");
5540 break;
5541 }
5542
5543 ++NumWorklistIterations;
5544 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5545 << F.getName() << "\n");
5546
5547 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5548 ORE, BFI, BPI, PSI, DL, RPOT);
5550 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5551 MadeChangeInThisIteration |= IC.run();
5552 if (!MadeChangeInThisIteration)
5553 break;
5554
5555 MadeIRChange = true;
5556 if (Iteration > Opts.MaxIterations) {
5558 "Instruction Combining on " + Twine(F.getName()) +
5559 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5560 " iterations. " +
5561 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5562 "'instcombine-no-verify-fixpoint' to suppress this error.",
5563 /*GenCrashDiag=*/false);
5564 }
5565 }
5566
5567 if (Iteration == 1)
5568 ++NumOneIteration;
5569 else if (Iteration == 2)
5570 ++NumTwoIterations;
5571 else if (Iteration == 3)
5572 ++NumThreeIterations;
5573 else
5574 ++NumFourOrMoreIterations;
5575
5576 return MadeIRChange;
5577}
5578
5580
5582 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5583 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5584 OS, MapClassName2PassName);
5585 OS << '<';
5586 OS << "max-iterations=" << Options.MaxIterations << ";";
5587 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5588 OS << '>';
5589}
5590
5591char InstCombinePass::ID = 0;
5592
5595 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
5596 // No changes since last InstCombine pass, exit early.
5597 if (LRT.shouldSkip(&ID))
5598 return PreservedAnalyses::all();
5599
5600 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5601 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5602 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5604 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5605
5606 auto *AA = &AM.getResult<AAManager>(F);
5607 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5608 ProfileSummaryInfo *PSI =
5609 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5610 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5611 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
5613
5614 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5615 BFI, BPI, PSI, Options)) {
5616 // No changes, all analyses are preserved.
5617 LRT.update(&ID, /*Changed=*/false);
5618 return PreservedAnalyses::all();
5619 }
5620
5621 // Mark all the analyses that instcombine updates as preserved.
5623 LRT.update(&ID, /*Changed=*/true);
5626 return PA;
5627}
5628
5630 AU.setPreservesCFG();
5643}
5644
5646 if (skipFunction(F))
5647 return false;
5648
5649 // Required analyses.
5650 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5651 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5652 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
5653 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5654 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5655 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5656
5657 // Optional analyses.
5658 ProfileSummaryInfo *PSI =
5659 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5660 BlockFrequencyInfo *BFI =
5661 (PSI && PSI->hasProfileSummary()) ?
5662 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5663 nullptr;
5664 BranchProbabilityInfo *BPI = nullptr;
5665 if (auto *WrapperPass =
5666 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5667 BPI = &WrapperPass->getBPI();
5668
5669 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5670 BFI, BPI, PSI, InstCombineOptions());
5671}
5672
5674
5677}
5678
5680 "Combine redundant instructions", false, false)
5692
5693// Initialization Routines
5696}
5697
5699 return new InstructionCombiningPass();
5700}
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:190
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
Hexagon Vector Combine
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned OpIndex
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1732
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:423
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition: APInt.cpp:1864
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:910
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1902
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:827
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1915
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:851
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:256
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
Class to represent array types.
Definition: DerivedTypes.h:395
uint64_t getNumElements() const
Definition: DerivedTypes.h:407
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
Definition: DerivedTypes.h:408
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
Definition: Attributes.cpp:439
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
Definition: BasicBlock.cpp:250
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:374
const Instruction & front() const
Definition: BasicBlock.h:471
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:571
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:467
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
Definition: BasicBlock.cpp:430
size_t size() const
Definition: BasicBlock.h:469
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:293
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1420
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1920
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1417
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:787
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
ConstantArray - Constant Array Declarations.
Definition: Constants.h:427
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1312
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:770
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2644
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2631
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2637
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2691
static Constant * getNeg(Constant *C, bool HasNSW=false)
Definition: Constants.cpp:2625
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:880
This class represents a range of values.
Definition: ConstantRange.h:47
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition: Constants.h:511
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1421
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:403
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:784
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
const Constant * stripPointerCasts() const
Definition: Constant.h:218
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
Definition: DataLayout.cpp:971
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
Definition: DataLayout.h:219
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:754
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:878
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:369
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:617
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:893
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:87
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:205
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
const BasicBlock & getEntryBlock() const
Definition: Function.h:809
Represents flags for the getelementptr instruction/expression.
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
Definition: Operator.h:430
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:956
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:980
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:113
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1699
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2554
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1043
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2044
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2573
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1986
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:330
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1881
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1873
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Definition: IRBuilder.h:252
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:879
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:890
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:505
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2403
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2434
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1756
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1386
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1797
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2532
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1517
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1369
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2018
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1670
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2224
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:199
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1498
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1561
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2379
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1693
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:535
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition: IRBuilder.h:521
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition: IRBuilder.h:74
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
Definition: InstCombiner.h:48
SimplifyQuery SQ
Definition: InstCombiner.h:77
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:337
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Definition: InstCombiner.h:228
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Definition: InstCombiner.h:143
TargetLibraryInfo & TLI
Definition: InstCombiner.h:74
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:368
AAResults * AA
Definition: InstCombiner.h:70
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:388
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:56
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
Definition: InstCombiner.h:187
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
Definition: InstCombiner.h:420
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
Definition: InstCombiner.h:160
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:65
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
Definition: InstCombiner.h:377
BranchProbabilityInfo * BPI
Definition: InstCombiner.h:80
ReversePostOrderTraversal< BasicBlock * > & RPOT
Definition: InstCombiner.h:84
const DataLayout & DL
Definition: InstCombiner.h:76
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombiner.h:455
DomConditionCache DC
Definition: InstCombiner.h:82
const bool MinimizeSize
Definition: InstCombiner.h:68
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:332
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
Definition: InstCombiner.h:97
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:412
DominatorTree & DT
Definition: InstCombiner.h:75
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
Definition: InstCombiner.h:280
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
Definition: InstCombiner.h:89
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
Definition: InstCombiner.h:433
BuilderTy & Builder
Definition: InstCombiner.h:61
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
Definition: InstCombiner.h:209
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
Definition: InstCombiner.h:358
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition: InstCombine.h:66
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
void zap()
Check that the worklist is empty and nuke the backing store for the map.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
Definition: Instruction.h:328
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:475
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:68
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1764
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:72
bool isTerminator() const
Definition: Instruction.h:277
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
Definition: Instruction.h:333
bool isShift() const
Definition: Instruction.h:282
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:472
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isIntDivRem() const
Definition: Instruction.h:280
Class to represent integer types.
Definition: DerivedTypes.h:42
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Definition: Instructions.h:176
Metadata node.
Definition: Metadata.h:1069
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1430
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition: Metadata.h:62
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:692
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:110
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:104
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition: Constants.h:1460
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition: SmallPtrSet.h:94
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
iterator erase(const_iterator CI)
Definition: SmallVector.h:737
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
typename SuperClass::iterator iterator
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Definition: TargetFolder.h:34
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:258
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:288
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
op_iterator op_begin()
Definition: User.h:280
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:740
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:157
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:852
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isZero() const
Definition: TypeSize.h:156
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:135
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:731
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:982
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:826
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
Definition: PatternMatch.h:186
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Definition: PatternMatch.h:560
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:245
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition: PatternMatch.h:305
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:864
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
Definition: PatternMatch.h:791
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:480
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:854
void stable_sort(R &&Range)
Definition: STLExtras.h:2037
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2877
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2448
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2316
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:162
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1683
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2859
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
Definition: bit.h:146
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:406
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition: Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:44
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1990
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value that has an associated llvm....
Definition: Local.cpp:1731
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2787
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:336
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition: STLExtras.h:2067
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
static unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:315
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:243
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:43
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:240
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const