LLVM 20.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APInt.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
41#include "llvm/ADT/Statistic.h"
46#include "llvm/Analysis/CFG.h"
61#include "llvm/IR/BasicBlock.h"
62#include "llvm/IR/CFG.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DIBuilder.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
69#include "llvm/IR/Dominators.h"
71#include "llvm/IR/Function.h"
73#include "llvm/IR/IRBuilder.h"
74#include "llvm/IR/InstrTypes.h"
75#include "llvm/IR/Instruction.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/Metadata.h"
80#include "llvm/IR/Operator.h"
81#include "llvm/IR/PassManager.h"
83#include "llvm/IR/Type.h"
84#include "llvm/IR/Use.h"
85#include "llvm/IR/User.h"
86#include "llvm/IR/Value.h"
87#include "llvm/IR/ValueHandle.h"
92#include "llvm/Support/Debug.h"
100#include <algorithm>
101#include <cassert>
102#include <cstdint>
103#include <memory>
104#include <optional>
105#include <string>
106#include <utility>
107
108#define DEBUG_TYPE "instcombine"
110#include <optional>
111
112using namespace llvm;
113using namespace llvm::PatternMatch;
114
115STATISTIC(NumWorklistIterations,
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration, "Number of functions with one iteration");
118STATISTIC(NumTwoIterations, "Number of functions with two iterations");
119STATISTIC(NumThreeIterations, "Number of functions with three iterations");
120STATISTIC(NumFourOrMoreIterations,
121 "Number of functions with four or more iterations");
122
123STATISTIC(NumCombined , "Number of insts combined");
124STATISTIC(NumConstProp, "Number of constant folds");
125STATISTIC(NumDeadInst , "Number of dead inst eliminated");
126STATISTIC(NumSunkInst , "Number of instructions sunk");
127STATISTIC(NumExpand, "Number of expansions");
128STATISTIC(NumFactor , "Number of factorizations");
129STATISTIC(NumReassoc , "Number of reassociations");
130DEBUG_COUNTER(VisitCounter, "instcombine-visit",
131 "Controls which instructions are visited");
132
133static cl::opt<bool>
134EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
135 cl::init(true));
136
138 "instcombine-max-sink-users", cl::init(32),
139 cl::desc("Maximum number of undroppable users for instruction sinking"));
140
142MaxArraySize("instcombine-maxarray-size", cl::init(1024),
143 cl::desc("Maximum array size considered when doing a combine"));
144
145// FIXME: Remove this flag when it is no longer necessary to convert
146// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
147// increases variable availability at the cost of accuracy. Variables that
148// cannot be promoted by mem2reg or SROA will be described as living in memory
149// for their entire lifetime. However, passes like DSE and instcombine can
150// delete stores to the alloca, leading to misleading and inaccurate debug
151// information. This flag can be removed when those passes are fixed.
152static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
153 cl::Hidden, cl::init(true));
154
155std::optional<Instruction *>
157 // Handle target specific intrinsics
158 if (II.getCalledFunction()->isTargetIntrinsic()) {
159 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
160 }
161 return std::nullopt;
162}
163
165 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
166 bool &KnownBitsComputed) {
167 // Handle target specific intrinsics
168 if (II.getCalledFunction()->isTargetIntrinsic()) {
169 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
170 *this, II, DemandedMask, Known, KnownBitsComputed);
171 }
172 return std::nullopt;
173}
174
176 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
177 APInt &PoisonElts2, APInt &PoisonElts3,
178 std::function<void(Instruction *, unsigned, APInt, APInt &)>
179 SimplifyAndSetOp) {
180 // Handle target specific intrinsics
181 if (II.getCalledFunction()->isTargetIntrinsic()) {
182 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
183 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
184 SimplifyAndSetOp);
185 }
186 return std::nullopt;
187}
188
189bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
190 // Approved exception for TTI use: This queries a legality property of the
191 // target, not an profitability heuristic. Ideally this should be part of
192 // DataLayout instead.
193 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
194}
195
196Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
197 if (!RewriteGEP)
199
201 auto *Inst = dyn_cast<Instruction>(GEP);
202 if (Inst)
204
205 Value *Offset = EmitGEPOffset(GEP);
206 // If a non-trivial GEP has other uses, rewrite it to avoid duplicating
207 // the offset arithmetic.
208 if (Inst && !GEP->hasOneUse() && !GEP->hasAllConstantIndices() &&
209 !GEP->getSourceElementType()->isIntegerTy(8)) {
211 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
212 Offset, "", GEP->getNoWrapFlags()));
214 }
215 return Offset;
216}
217
218/// Legal integers and common types are considered desirable. This is used to
219/// avoid creating instructions with types that may not be supported well by the
220/// the backend.
221/// NOTE: This treats i8, i16 and i32 specially because they are common
222/// types in frontend languages.
223bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
224 switch (BitWidth) {
225 case 8:
226 case 16:
227 case 32:
228 return true;
229 default:
230 return DL.isLegalInteger(BitWidth);
231 }
232}
233
234/// Return true if it is desirable to convert an integer computation from a
235/// given bit width to a new bit width.
236/// We don't want to convert from a legal or desirable type (like i8) to an
237/// illegal type or from a smaller to a larger illegal type. A width of '1'
238/// is always treated as a desirable type because i1 is a fundamental type in
239/// IR, and there are many specialized optimizations for i1 types.
240/// Common/desirable widths are equally treated as legal to convert to, in
241/// order to open up more combining opportunities.
242bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
243 unsigned ToWidth) const {
244 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
245 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
246
247 // Convert to desirable widths even if they are not legal types.
248 // Only shrink types, to prevent infinite loops.
249 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
250 return true;
251
252 // If this is a legal or desiable integer from type, and the result would be
253 // an illegal type, don't do the transformation.
254 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
255 return false;
256
257 // Otherwise, if both are illegal, do not increase the size of the result. We
258 // do allow things like i160 -> i64, but not i64 -> i160.
259 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
260 return false;
261
262 return true;
263}
264
265/// Return true if it is desirable to convert a computation from 'From' to 'To'.
266/// We don't want to convert from a legal to an illegal type or from a smaller
267/// to a larger illegal type. i1 is always treated as a legal type because it is
268/// a fundamental type in IR, and there are many specialized optimizations for
269/// i1 types.
270bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
271 // TODO: This could be extended to allow vectors. Datalayout changes might be
272 // needed to properly support that.
273 if (!From->isIntegerTy() || !To->isIntegerTy())
274 return false;
275
276 unsigned FromWidth = From->getPrimitiveSizeInBits();
277 unsigned ToWidth = To->getPrimitiveSizeInBits();
278 return shouldChangeType(FromWidth, ToWidth);
279}
280
281// Return true, if No Signed Wrap should be maintained for I.
282// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
283// where both B and C should be ConstantInts, results in a constant that does
284// not overflow. This function only handles the Add/Sub/Mul opcodes. For
285// all other opcodes, the function conservatively returns false.
287 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
288 if (!OBO || !OBO->hasNoSignedWrap())
289 return false;
290
291 const APInt *BVal, *CVal;
292 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
293 return false;
294
295 // We reason about Add/Sub/Mul Only.
296 bool Overflow = false;
297 switch (I.getOpcode()) {
298 case Instruction::Add:
299 (void)BVal->sadd_ov(*CVal, Overflow);
300 break;
301 case Instruction::Sub:
302 (void)BVal->ssub_ov(*CVal, Overflow);
303 break;
304 case Instruction::Mul:
305 (void)BVal->smul_ov(*CVal, Overflow);
306 break;
307 default:
308 // Conservatively return false for other opcodes.
309 return false;
310 }
311 return !Overflow;
312}
313
315 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
316 return OBO && OBO->hasNoUnsignedWrap();
317}
318
320 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
321 return OBO && OBO->hasNoSignedWrap();
322}
323
324/// Conservatively clears subclassOptionalData after a reassociation or
325/// commutation. We preserve fast-math flags when applicable as they can be
326/// preserved.
328 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
329 if (!FPMO) {
330 I.clearSubclassOptionalData();
331 return;
332 }
333
334 FastMathFlags FMF = I.getFastMathFlags();
335 I.clearSubclassOptionalData();
336 I.setFastMathFlags(FMF);
337}
338
339/// Combine constant operands of associative operations either before or after a
340/// cast to eliminate one of the associative operations:
341/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
342/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
344 InstCombinerImpl &IC) {
345 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
346 if (!Cast || !Cast->hasOneUse())
347 return false;
348
349 // TODO: Enhance logic for other casts and remove this check.
350 auto CastOpcode = Cast->getOpcode();
351 if (CastOpcode != Instruction::ZExt)
352 return false;
353
354 // TODO: Enhance logic for other BinOps and remove this check.
355 if (!BinOp1->isBitwiseLogicOp())
356 return false;
357
358 auto AssocOpcode = BinOp1->getOpcode();
359 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
360 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
361 return false;
362
363 Constant *C1, *C2;
364 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
365 !match(BinOp2->getOperand(1), m_Constant(C2)))
366 return false;
367
368 // TODO: This assumes a zext cast.
369 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
370 // to the destination type might lose bits.
371
372 // Fold the constants together in the destination type:
373 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
374 const DataLayout &DL = IC.getDataLayout();
375 Type *DestTy = C1->getType();
376 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
377 if (!CastC2)
378 return false;
379 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
380 if (!FoldedC)
381 return false;
382
383 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
384 IC.replaceOperand(*BinOp1, 1, FoldedC);
386 Cast->dropPoisonGeneratingFlags();
387 return true;
388}
389
390// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
391// inttoptr ( ptrtoint (x) ) --> x
392Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
393 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
394 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
395 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
396 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
397 Type *CastTy = IntToPtr->getDestTy();
398 if (PtrToInt &&
399 CastTy->getPointerAddressSpace() ==
400 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
401 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
402 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
403 return PtrToInt->getOperand(0);
404 }
405 return nullptr;
406}
407
408/// This performs a few simplifications for operators that are associative or
409/// commutative:
410///
411/// Commutative operators:
412///
413/// 1. Order operands such that they are listed from right (least complex) to
414/// left (most complex). This puts constants before unary operators before
415/// binary operators.
416///
417/// Associative operators:
418///
419/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
420/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
421///
422/// Associative and commutative operators:
423///
424/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
425/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
426/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
427/// if C1 and C2 are constants.
429 Instruction::BinaryOps Opcode = I.getOpcode();
430 bool Changed = false;
431
432 do {
433 // Order operands such that they are listed from right (least complex) to
434 // left (most complex). This puts constants before unary operators before
435 // binary operators.
436 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
437 getComplexity(I.getOperand(1)))
438 Changed = !I.swapOperands();
439
440 if (I.isCommutative()) {
441 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
442 replaceOperand(I, 0, Pair->first);
443 replaceOperand(I, 1, Pair->second);
444 Changed = true;
445 }
446 }
447
448 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
449 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
450
451 if (I.isAssociative()) {
452 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
453 if (Op0 && Op0->getOpcode() == Opcode) {
454 Value *A = Op0->getOperand(0);
455 Value *B = Op0->getOperand(1);
456 Value *C = I.getOperand(1);
457
458 // Does "B op C" simplify?
459 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
460 // It simplifies to V. Form "A op V".
461 replaceOperand(I, 0, A);
462 replaceOperand(I, 1, V);
463 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
464 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
465
466 // Conservatively clear all optional flags since they may not be
467 // preserved by the reassociation. Reset nsw/nuw based on the above
468 // analysis.
470
471 // Note: this is only valid because SimplifyBinOp doesn't look at
472 // the operands to Op0.
473 if (IsNUW)
474 I.setHasNoUnsignedWrap(true);
475
476 if (IsNSW)
477 I.setHasNoSignedWrap(true);
478
479 Changed = true;
480 ++NumReassoc;
481 continue;
482 }
483 }
484
485 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
486 if (Op1 && Op1->getOpcode() == Opcode) {
487 Value *A = I.getOperand(0);
488 Value *B = Op1->getOperand(0);
489 Value *C = Op1->getOperand(1);
490
491 // Does "A op B" simplify?
492 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
493 // It simplifies to V. Form "V op C".
494 replaceOperand(I, 0, V);
495 replaceOperand(I, 1, C);
496 // Conservatively clear the optional flags, since they may not be
497 // preserved by the reassociation.
499 Changed = true;
500 ++NumReassoc;
501 continue;
502 }
503 }
504 }
505
506 if (I.isAssociative() && I.isCommutative()) {
507 if (simplifyAssocCastAssoc(&I, *this)) {
508 Changed = true;
509 ++NumReassoc;
510 continue;
511 }
512
513 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
514 if (Op0 && Op0->getOpcode() == Opcode) {
515 Value *A = Op0->getOperand(0);
516 Value *B = Op0->getOperand(1);
517 Value *C = I.getOperand(1);
518
519 // Does "C op A" simplify?
520 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
521 // It simplifies to V. Form "V op B".
522 replaceOperand(I, 0, V);
523 replaceOperand(I, 1, B);
524 // Conservatively clear the optional flags, since they may not be
525 // preserved by the reassociation.
527 Changed = true;
528 ++NumReassoc;
529 continue;
530 }
531 }
532
533 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
534 if (Op1 && Op1->getOpcode() == Opcode) {
535 Value *A = I.getOperand(0);
536 Value *B = Op1->getOperand(0);
537 Value *C = Op1->getOperand(1);
538
539 // Does "C op A" simplify?
540 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
541 // It simplifies to V. Form "B op V".
542 replaceOperand(I, 0, B);
543 replaceOperand(I, 1, V);
544 // Conservatively clear the optional flags, since they may not be
545 // preserved by the reassociation.
547 Changed = true;
548 ++NumReassoc;
549 continue;
550 }
551 }
552
553 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
554 // if C1 and C2 are constants.
555 Value *A, *B;
556 Constant *C1, *C2, *CRes;
557 if (Op0 && Op1 &&
558 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
559 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
560 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
561 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
562 bool IsNUW = hasNoUnsignedWrap(I) &&
563 hasNoUnsignedWrap(*Op0) &&
564 hasNoUnsignedWrap(*Op1);
565 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
566 BinaryOperator::CreateNUW(Opcode, A, B) :
567 BinaryOperator::Create(Opcode, A, B);
568
569 if (isa<FPMathOperator>(NewBO)) {
570 FastMathFlags Flags = I.getFastMathFlags() &
571 Op0->getFastMathFlags() &
572 Op1->getFastMathFlags();
573 NewBO->setFastMathFlags(Flags);
574 }
575 InsertNewInstWith(NewBO, I.getIterator());
576 NewBO->takeName(Op1);
577 replaceOperand(I, 0, NewBO);
578 replaceOperand(I, 1, CRes);
579 // Conservatively clear the optional flags, since they may not be
580 // preserved by the reassociation.
582 if (IsNUW)
583 I.setHasNoUnsignedWrap(true);
584
585 Changed = true;
586 continue;
587 }
588 }
589
590 // No further simplifications.
591 return Changed;
592 } while (true);
593}
594
595/// Return whether "X LOp (Y ROp Z)" is always equal to
596/// "(X LOp Y) ROp (X LOp Z)".
599 // X & (Y | Z) <--> (X & Y) | (X & Z)
600 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
601 if (LOp == Instruction::And)
602 return ROp == Instruction::Or || ROp == Instruction::Xor;
603
604 // X | (Y & Z) <--> (X | Y) & (X | Z)
605 if (LOp == Instruction::Or)
606 return ROp == Instruction::And;
607
608 // X * (Y + Z) <--> (X * Y) + (X * Z)
609 // X * (Y - Z) <--> (X * Y) - (X * Z)
610 if (LOp == Instruction::Mul)
611 return ROp == Instruction::Add || ROp == Instruction::Sub;
612
613 return false;
614}
615
616/// Return whether "(X LOp Y) ROp Z" is always equal to
617/// "(X ROp Z) LOp (Y ROp Z)".
621 return leftDistributesOverRight(ROp, LOp);
622
623 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
625
626 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
627 // but this requires knowing that the addition does not overflow and other
628 // such subtleties.
629}
630
631/// This function returns identity value for given opcode, which can be used to
632/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
634 if (isa<Constant>(V))
635 return nullptr;
636
637 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
638}
639
640/// This function predicates factorization using distributive laws. By default,
641/// it just returns the 'Op' inputs. But for special-cases like
642/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
643/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
644/// allow more factorization opportunities.
647 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
648 assert(Op && "Expected a binary operator");
649 LHS = Op->getOperand(0);
650 RHS = Op->getOperand(1);
651 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
652 Constant *C;
653 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
654 // X << C --> X * (1 << C)
656 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
657 assert(RHS && "Constant folding of immediate constants failed");
658 return Instruction::Mul;
659 }
660 // TODO: We can add other conversions e.g. shr => div etc.
661 }
662 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
663 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
665 // lshr nneg C, X --> ashr nneg C, X
666 return Instruction::AShr;
667 }
668 }
669 return Op->getOpcode();
670}
671
672/// This tries to simplify binary operations by factorizing out common terms
673/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
676 Instruction::BinaryOps InnerOpcode, Value *A,
677 Value *B, Value *C, Value *D) {
678 assert(A && B && C && D && "All values must be provided");
679
680 Value *V = nullptr;
681 Value *RetVal = nullptr;
682 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
683 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
684
685 // Does "X op' Y" always equal "Y op' X"?
686 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
687
688 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
689 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
690 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
691 // commutative case, "(A op' B) op (C op' A)"?
692 if (A == C || (InnerCommutative && A == D)) {
693 if (A != C)
694 std::swap(C, D);
695 // Consider forming "A op' (B op D)".
696 // If "B op D" simplifies then it can be formed with no cost.
697 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
698
699 // If "B op D" doesn't simplify then only go on if one of the existing
700 // operations "A op' B" and "C op' D" will be zapped as no longer used.
701 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
702 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
703 if (V)
704 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
705 }
706 }
707
708 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
709 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
710 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
711 // commutative case, "(A op' B) op (B op' D)"?
712 if (B == D || (InnerCommutative && B == C)) {
713 if (B != D)
714 std::swap(C, D);
715 // Consider forming "(A op C) op' B".
716 // If "A op C" simplifies then it can be formed with no cost.
717 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
718
719 // If "A op C" doesn't simplify then only go on if one of the existing
720 // operations "A op' B" and "C op' D" will be zapped as no longer used.
721 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
722 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
723 if (V)
724 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
725 }
726 }
727
728 if (!RetVal)
729 return nullptr;
730
731 ++NumFactor;
732 RetVal->takeName(&I);
733
734 // Try to add no-overflow flags to the final value.
735 if (isa<OverflowingBinaryOperator>(RetVal)) {
736 bool HasNSW = false;
737 bool HasNUW = false;
738 if (isa<OverflowingBinaryOperator>(&I)) {
739 HasNSW = I.hasNoSignedWrap();
740 HasNUW = I.hasNoUnsignedWrap();
741 }
742 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
743 HasNSW &= LOBO->hasNoSignedWrap();
744 HasNUW &= LOBO->hasNoUnsignedWrap();
745 }
746
747 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
748 HasNSW &= ROBO->hasNoSignedWrap();
749 HasNUW &= ROBO->hasNoUnsignedWrap();
750 }
751
752 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
753 // We can propagate 'nsw' if we know that
754 // %Y = mul nsw i16 %X, C
755 // %Z = add nsw i16 %Y, %X
756 // =>
757 // %Z = mul nsw i16 %X, C+1
758 //
759 // iff C+1 isn't INT_MIN
760 const APInt *CInt;
761 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
762 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
763
764 // nuw can be propagated with any constant or nuw value.
765 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
766 }
767 }
768 return RetVal;
769}
770
771// If `I` has one Const operand and the other matches `(ctpop (not x))`,
772// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
773// This is only useful is the new subtract can fold so we only handle the
774// following cases:
775// 1) (add/sub/disjoint_or C, (ctpop (not x))
776// -> (add/sub/disjoint_or C', (ctpop x))
777// 1) (cmp pred C, (ctpop (not x))
778// -> (cmp pred C', (ctpop x))
780 unsigned Opc = I->getOpcode();
781 unsigned ConstIdx = 1;
782 switch (Opc) {
783 default:
784 return nullptr;
785 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
786 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
787 // is constant.
788 case Instruction::Sub:
789 ConstIdx = 0;
790 break;
791 case Instruction::ICmp:
792 // Signed predicates aren't correct in some edge cases like for i2 types, as
793 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
794 // comparisons against it are simplfied to unsigned.
795 if (cast<ICmpInst>(I)->isSigned())
796 return nullptr;
797 break;
798 case Instruction::Or:
799 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
800 return nullptr;
801 [[fallthrough]];
802 case Instruction::Add:
803 break;
804 }
805
806 Value *Op;
807 // Find ctpop.
808 if (!match(I->getOperand(1 - ConstIdx),
809 m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(Op)))))
810 return nullptr;
811
812 Constant *C;
813 // Check other operand is ImmConstant.
814 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
815 return nullptr;
816
817 Type *Ty = Op->getType();
818 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
819 // Need extra check for icmp. Note if this check is true, it generally means
820 // the icmp will simplify to true/false.
821 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
822 Constant *Cmp =
824 if (!Cmp || !Cmp->isZeroValue())
825 return nullptr;
826 }
827
828 // Check we can invert `(not x)` for free.
829 bool Consumes = false;
830 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
831 return nullptr;
832 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
833 assert(NotOp != nullptr &&
834 "Desync between isFreeToInvert and getFreelyInverted");
835
836 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
837
838 Value *R = nullptr;
839
840 // Do the transformation here to avoid potentially introducing an infinite
841 // loop.
842 switch (Opc) {
843 case Instruction::Sub:
844 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
845 break;
846 case Instruction::Or:
847 case Instruction::Add:
848 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
849 break;
850 case Instruction::ICmp:
851 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
852 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
853 break;
854 default:
855 llvm_unreachable("Unhandled Opcode");
856 }
857 assert(R != nullptr);
858 return replaceInstUsesWith(*I, R);
859}
860
861// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
862// IFF
863// 1) the logic_shifts match
864// 2) either both binops are binops and one is `and` or
865// BinOp1 is `and`
866// (logic_shift (inv_logic_shift C1, C), C) == C1 or
867//
868// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
869//
870// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
871// IFF
872// 1) the logic_shifts match
873// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
874//
875// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
876//
877// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
878// IFF
879// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
880// 2) Binop2 is `not`
881//
882// -> (arithmetic_shift Binop1((not X), Y), Amt)
883
885 const DataLayout &DL = I.getDataLayout();
886 auto IsValidBinOpc = [](unsigned Opc) {
887 switch (Opc) {
888 default:
889 return false;
890 case Instruction::And:
891 case Instruction::Or:
892 case Instruction::Xor:
893 case Instruction::Add:
894 // Skip Sub as we only match constant masks which will canonicalize to use
895 // add.
896 return true;
897 }
898 };
899
900 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
901 // constraints.
902 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
903 unsigned ShOpc) {
904 assert(ShOpc != Instruction::AShr);
905 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
906 ShOpc == Instruction::Shl;
907 };
908
909 auto GetInvShift = [](unsigned ShOpc) {
910 assert(ShOpc != Instruction::AShr);
911 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
912 };
913
914 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
915 unsigned ShOpc, Constant *CMask,
916 Constant *CShift) {
917 // If the BinOp1 is `and` we don't need to check the mask.
918 if (BinOpc1 == Instruction::And)
919 return true;
920
921 // For all other possible transfers we need complete distributable
922 // binop/shift (anything but `add` + `lshr`).
923 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
924 return false;
925
926 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
927 // vecs, otherwise the mask will be simplified and the following check will
928 // handle it).
929 if (BinOpc2 == Instruction::And)
930 return true;
931
932 // Otherwise, need mask that meets the below requirement.
933 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
934 Constant *MaskInvShift =
935 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
936 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
937 CMask;
938 };
939
940 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
941 Constant *CMask, *CShift;
942 Value *X, *Y, *ShiftedX, *Mask, *Shift;
943 if (!match(I.getOperand(ShOpnum),
944 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
945 return nullptr;
946 if (!match(I.getOperand(1 - ShOpnum),
949 m_Value(ShiftedX)),
950 m_Value(Mask))))
951 return nullptr;
952 // Make sure we are matching instruction shifts and not ConstantExpr
953 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
954 auto *IX = dyn_cast<Instruction>(ShiftedX);
955 if (!IY || !IX)
956 return nullptr;
957
958 // LHS and RHS need same shift opcode
959 unsigned ShOpc = IY->getOpcode();
960 if (ShOpc != IX->getOpcode())
961 return nullptr;
962
963 // Make sure binop is real instruction and not ConstantExpr
964 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
965 if (!BO2)
966 return nullptr;
967
968 unsigned BinOpc = BO2->getOpcode();
969 // Make sure we have valid binops.
970 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
971 return nullptr;
972
973 if (ShOpc == Instruction::AShr) {
974 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
975 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
976 Value *NotX = Builder.CreateNot(X);
977 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
979 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
980 }
981
982 return nullptr;
983 }
984
985 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
986 // distribute to drop the shift irrelevant of constants.
987 if (BinOpc == I.getOpcode() &&
988 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
989 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
990 Value *NewBinOp1 = Builder.CreateBinOp(
991 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
992 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
993 }
994
995 // Otherwise we can only distribute by constant shifting the mask, so
996 // ensure we have constants.
997 if (!match(Shift, m_ImmConstant(CShift)))
998 return nullptr;
999 if (!match(Mask, m_ImmConstant(CMask)))
1000 return nullptr;
1001
1002 // Check if we can distribute the binops.
1003 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1004 return nullptr;
1005
1006 Constant *NewCMask =
1007 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1008 Value *NewBinOp2 = Builder.CreateBinOp(
1009 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1010 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1011 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1012 NewBinOp1, CShift);
1013 };
1014
1015 if (Instruction *R = MatchBinOp(0))
1016 return R;
1017 return MatchBinOp(1);
1018}
1019
1020// (Binop (zext C), (select C, T, F))
1021// -> (select C, (binop 1, T), (binop 0, F))
1022//
1023// (Binop (sext C), (select C, T, F))
1024// -> (select C, (binop -1, T), (binop 0, F))
1025//
1026// Attempt to simplify binary operations into a select with folded args, when
1027// one operand of the binop is a select instruction and the other operand is a
1028// zext/sext extension, whose value is the select condition.
1031 // TODO: this simplification may be extended to any speculatable instruction,
1032 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1033 Instruction::BinaryOps Opc = I.getOpcode();
1034 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1035 Value *A, *CondVal, *TrueVal, *FalseVal;
1036 Value *CastOp;
1037
1038 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1039 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1040 A->getType()->getScalarSizeInBits() == 1 &&
1041 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1042 m_Value(FalseVal)));
1043 };
1044
1045 // Make sure one side of the binop is a select instruction, and the other is a
1046 // zero/sign extension operating on a i1.
1047 if (MatchSelectAndCast(LHS, RHS))
1048 CastOp = LHS;
1049 else if (MatchSelectAndCast(RHS, LHS))
1050 CastOp = RHS;
1051 else
1052 return nullptr;
1053
1054 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1055 bool IsCastOpRHS = (CastOp == RHS);
1056 bool IsZExt = isa<ZExtInst>(CastOp);
1057 Constant *C;
1058
1059 if (IsTrueArm) {
1060 C = Constant::getNullValue(V->getType());
1061 } else if (IsZExt) {
1062 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1063 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1064 } else {
1065 C = Constant::getAllOnesValue(V->getType());
1066 }
1067
1068 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1069 : Builder.CreateBinOp(Opc, C, V);
1070 };
1071
1072 // If the value used in the zext/sext is the select condition, or the negated
1073 // of the select condition, the binop can be simplified.
1074 if (CondVal == A) {
1075 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1076 return SelectInst::Create(CondVal, NewTrueVal,
1077 NewFoldedConst(true, FalseVal));
1078 }
1079
1080 if (match(A, m_Not(m_Specific(CondVal)))) {
1081 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1082 return SelectInst::Create(CondVal, NewTrueVal,
1083 NewFoldedConst(false, FalseVal));
1084 }
1085
1086 return nullptr;
1087}
1088
1090 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1091 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1092 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1093 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1094 Value *A, *B, *C, *D;
1095 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1096
1097 if (Op0)
1098 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1099 if (Op1)
1100 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1101
1102 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1103 // a common term.
1104 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1105 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1106 return V;
1107
1108 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1109 // term.
1110 if (Op0)
1111 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1112 if (Value *V =
1113 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1114 return V;
1115
1116 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1117 // term.
1118 if (Op1)
1119 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1120 if (Value *V =
1121 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1122 return V;
1123
1124 return nullptr;
1125}
1126
1127/// This tries to simplify binary operations which some other binary operation
1128/// distributes over either by factorizing out common terms
1129/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1130/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1131/// Returns the simplified value, or null if it didn't simplify.
1133 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1134 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1135 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1136 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1137
1138 // Factorization.
1139 if (Value *R = tryFactorizationFolds(I))
1140 return R;
1141
1142 // Expansion.
1143 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1144 // The instruction has the form "(A op' B) op C". See if expanding it out
1145 // to "(A op C) op' (B op C)" results in simplifications.
1146 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1147 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1148
1149 // Disable the use of undef because it's not safe to distribute undef.
1150 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1151 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1152 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1153
1154 // Do "A op C" and "B op C" both simplify?
1155 if (L && R) {
1156 // They do! Return "L op' R".
1157 ++NumExpand;
1158 C = Builder.CreateBinOp(InnerOpcode, L, R);
1159 C->takeName(&I);
1160 return C;
1161 }
1162
1163 // Does "A op C" simplify to the identity value for the inner opcode?
1164 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1165 // They do! Return "B op C".
1166 ++NumExpand;
1167 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1168 C->takeName(&I);
1169 return C;
1170 }
1171
1172 // Does "B op C" simplify to the identity value for the inner opcode?
1173 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1174 // They do! Return "A op C".
1175 ++NumExpand;
1176 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1177 C->takeName(&I);
1178 return C;
1179 }
1180 }
1181
1182 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1183 // The instruction has the form "A op (B op' C)". See if expanding it out
1184 // to "(A op B) op' (A op C)" results in simplifications.
1185 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1186 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1187
1188 // Disable the use of undef because it's not safe to distribute undef.
1189 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1190 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1191 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1192
1193 // Do "A op B" and "A op C" both simplify?
1194 if (L && R) {
1195 // They do! Return "L op' R".
1196 ++NumExpand;
1197 A = Builder.CreateBinOp(InnerOpcode, L, R);
1198 A->takeName(&I);
1199 return A;
1200 }
1201
1202 // Does "A op B" simplify to the identity value for the inner opcode?
1203 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1204 // They do! Return "A op C".
1205 ++NumExpand;
1206 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1207 A->takeName(&I);
1208 return A;
1209 }
1210
1211 // Does "A op C" simplify to the identity value for the inner opcode?
1212 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1213 // They do! Return "A op B".
1214 ++NumExpand;
1215 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1216 A->takeName(&I);
1217 return A;
1218 }
1219 }
1220
1222}
1223
1224static std::optional<std::pair<Value *, Value *>>
1226 if (LHS->getParent() != RHS->getParent())
1227 return std::nullopt;
1228
1229 if (LHS->getNumIncomingValues() < 2)
1230 return std::nullopt;
1231
1232 if (!equal(LHS->blocks(), RHS->blocks()))
1233 return std::nullopt;
1234
1235 Value *L0 = LHS->getIncomingValue(0);
1236 Value *R0 = RHS->getIncomingValue(0);
1237
1238 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1239 Value *L1 = LHS->getIncomingValue(I);
1240 Value *R1 = RHS->getIncomingValue(I);
1241
1242 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1243 continue;
1244
1245 return std::nullopt;
1246 }
1247
1248 return std::optional(std::pair(L0, R0));
1249}
1250
1251std::optional<std::pair<Value *, Value *>>
1252InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1253 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1254 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1255 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1256 return std::nullopt;
1257 switch (LHSInst->getOpcode()) {
1258 case Instruction::PHI:
1259 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS));
1260 case Instruction::Select: {
1261 Value *Cond = LHSInst->getOperand(0);
1262 Value *TrueVal = LHSInst->getOperand(1);
1263 Value *FalseVal = LHSInst->getOperand(2);
1264 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1265 FalseVal == RHSInst->getOperand(1))
1266 return std::pair(TrueVal, FalseVal);
1267 return std::nullopt;
1268 }
1269 case Instruction::Call: {
1270 // Match min(a, b) and max(a, b)
1271 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1272 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1273 if (LHSMinMax && RHSMinMax &&
1274 LHSMinMax->getPredicate() ==
1276 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1277 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1278 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1279 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1280 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1281 return std::nullopt;
1282 }
1283 default:
1284 return std::nullopt;
1285 }
1286}
1287
1289 Value *LHS,
1290 Value *RHS) {
1291 Value *A, *B, *C, *D, *E, *F;
1292 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1293 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1294 if (!LHSIsSelect && !RHSIsSelect)
1295 return nullptr;
1296
1297 FastMathFlags FMF;
1299 if (isa<FPMathOperator>(&I)) {
1300 FMF = I.getFastMathFlags();
1302 }
1303
1304 Instruction::BinaryOps Opcode = I.getOpcode();
1306
1307 Value *Cond, *True = nullptr, *False = nullptr;
1308
1309 // Special-case for add/negate combination. Replace the zero in the negation
1310 // with the trailing add operand:
1311 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1312 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1313 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1314 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1315 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1316 return nullptr;
1317
1318 Value *N;
1319 if (True && match(FVal, m_Neg(m_Value(N)))) {
1320 Value *Sub = Builder.CreateSub(Z, N);
1321 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1322 }
1323 if (False && match(TVal, m_Neg(m_Value(N)))) {
1324 Value *Sub = Builder.CreateSub(Z, N);
1325 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1326 }
1327 return nullptr;
1328 };
1329
1330 if (LHSIsSelect && RHSIsSelect && A == D) {
1331 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1332 Cond = A;
1333 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1334 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1335
1336 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1337 if (False && !True)
1338 True = Builder.CreateBinOp(Opcode, B, E);
1339 else if (True && !False)
1340 False = Builder.CreateBinOp(Opcode, C, F);
1341 }
1342 } else if (LHSIsSelect && LHS->hasOneUse()) {
1343 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1344 Cond = A;
1345 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1346 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1347 if (Value *NewSel = foldAddNegate(B, C, RHS))
1348 return NewSel;
1349 } else if (RHSIsSelect && RHS->hasOneUse()) {
1350 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1351 Cond = D;
1352 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1353 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1354 if (Value *NewSel = foldAddNegate(E, F, LHS))
1355 return NewSel;
1356 }
1357
1358 if (!True || !False)
1359 return nullptr;
1360
1361 Value *SI = Builder.CreateSelect(Cond, True, False);
1362 SI->takeName(&I);
1363 return SI;
1364}
1365
1366/// Freely adapt every user of V as-if V was changed to !V.
1367/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1369 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1370 for (User *U : make_early_inc_range(I->users())) {
1371 if (U == IgnoredUser)
1372 continue; // Don't consider this user.
1373 switch (cast<Instruction>(U)->getOpcode()) {
1374 case Instruction::Select: {
1375 auto *SI = cast<SelectInst>(U);
1376 SI->swapValues();
1377 SI->swapProfMetadata();
1378 break;
1379 }
1380 case Instruction::Br: {
1381 BranchInst *BI = cast<BranchInst>(U);
1382 BI->swapSuccessors(); // swaps prof metadata too
1383 if (BPI)
1385 break;
1386 }
1387 case Instruction::Xor:
1388 replaceInstUsesWith(cast<Instruction>(*U), I);
1389 // Add to worklist for DCE.
1390 addToWorklist(cast<Instruction>(U));
1391 break;
1392 default:
1393 llvm_unreachable("Got unexpected user - out of sync with "
1394 "canFreelyInvertAllUsersOf() ?");
1395 }
1396 }
1397}
1398
1399/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1400/// constant zero (which is the 'negate' form).
1401Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1402 Value *NegV;
1403 if (match(V, m_Neg(m_Value(NegV))))
1404 return NegV;
1405
1406 // Constants can be considered to be negated values if they can be folded.
1407 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1408 return ConstantExpr::getNeg(C);
1409
1410 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1411 if (C->getType()->getElementType()->isIntegerTy())
1412 return ConstantExpr::getNeg(C);
1413
1414 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1415 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1416 Constant *Elt = CV->getAggregateElement(i);
1417 if (!Elt)
1418 return nullptr;
1419
1420 if (isa<UndefValue>(Elt))
1421 continue;
1422
1423 if (!isa<ConstantInt>(Elt))
1424 return nullptr;
1425 }
1426 return ConstantExpr::getNeg(CV);
1427 }
1428
1429 // Negate integer vector splats.
1430 if (auto *CV = dyn_cast<Constant>(V))
1431 if (CV->getType()->isVectorTy() &&
1432 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1433 return ConstantExpr::getNeg(CV);
1434
1435 return nullptr;
1436}
1437
1438// Try to fold:
1439// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1440// -> ({s|u}itofp (int_binop x, y))
1441// 2) (fp_binop ({s|u}itofp x), FpC)
1442// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1443//
1444// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1445Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1446 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1448
1449 Type *FPTy = BO.getType();
1450 Type *IntTy = IntOps[0]->getType();
1451
1452 unsigned IntSz = IntTy->getScalarSizeInBits();
1453 // This is the maximum number of inuse bits by the integer where the int -> fp
1454 // casts are exact.
1455 unsigned MaxRepresentableBits =
1457
1458 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1459 // checks later on.
1460 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1461
1462 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1463 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1464 auto IsNonZero = [&](unsigned OpNo) -> bool {
1465 if (OpsKnown[OpNo].hasKnownBits() &&
1466 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1467 return true;
1468 return isKnownNonZero(IntOps[OpNo], SQ);
1469 };
1470
1471 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1472 // NB: This matches the impl in ValueTracking, we just try to use cached
1473 // knownbits here. If we ever start supporting WithCache for
1474 // `isKnownNonNegative`, change this to an explicit call.
1475 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1476 };
1477
1478 // Check if we know for certain that ({s|u}itofp op) is exact.
1479 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1480 // Can we treat this operand as the desired sign?
1481 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1482 !IsNonNeg(OpNo))
1483 return false;
1484
1485 // If fp precision >= bitwidth(op) then its exact.
1486 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1487 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1488 // handled specially. We can't, however, increase the bound arbitrarily for
1489 // `sitofp` as for larger sizes, it won't sign extend.
1490 if (MaxRepresentableBits < IntSz) {
1491 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1492 // numSignBits(op).
1493 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1494 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1495 if (OpsFromSigned)
1496 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1497 // Finally for unsigned check that fp precision >= bitwidth(op) -
1498 // numLeadingZeros(op).
1499 else {
1500 NumUsedLeadingBits[OpNo] =
1501 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1502 }
1503 }
1504 // NB: We could also check if op is known to be a power of 2 or zero (which
1505 // will always be representable). Its unlikely, however, that is we are
1506 // unable to bound op in any way we will be able to pass the overflow checks
1507 // later on.
1508
1509 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1510 return false;
1511 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1512 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1513 IsNonZero(OpNo);
1514 };
1515
1516 // If we have a constant rhs, see if we can losslessly convert it to an int.
1517 if (Op1FpC != nullptr) {
1518 // Signed + Mul req non-zero
1519 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1520 !match(Op1FpC, m_NonZeroFP()))
1521 return nullptr;
1522
1524 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1525 IntTy, DL);
1526 if (Op1IntC == nullptr)
1527 return nullptr;
1528 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1529 : Instruction::UIToFP,
1530 Op1IntC, FPTy, DL) != Op1FpC)
1531 return nullptr;
1532
1533 // First try to keep sign of cast the same.
1534 IntOps[1] = Op1IntC;
1535 }
1536
1537 // Ensure lhs/rhs integer types match.
1538 if (IntTy != IntOps[1]->getType())
1539 return nullptr;
1540
1541 if (Op1FpC == nullptr) {
1542 if (!IsValidPromotion(1))
1543 return nullptr;
1544 }
1545 if (!IsValidPromotion(0))
1546 return nullptr;
1547
1548 // Final we check if the integer version of the binop will not overflow.
1550 // Because of the precision check, we can often rule out overflows.
1551 bool NeedsOverflowCheck = true;
1552 // Try to conservatively rule out overflow based on the already done precision
1553 // checks.
1554 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1555 unsigned OverflowMaxCurBits =
1556 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1557 bool OutputSigned = OpsFromSigned;
1558 switch (BO.getOpcode()) {
1559 case Instruction::FAdd:
1560 IntOpc = Instruction::Add;
1561 OverflowMaxOutputBits += OverflowMaxCurBits;
1562 break;
1563 case Instruction::FSub:
1564 IntOpc = Instruction::Sub;
1565 OverflowMaxOutputBits += OverflowMaxCurBits;
1566 break;
1567 case Instruction::FMul:
1568 IntOpc = Instruction::Mul;
1569 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1570 break;
1571 default:
1572 llvm_unreachable("Unsupported binop");
1573 }
1574 // The precision check may have already ruled out overflow.
1575 if (OverflowMaxOutputBits < IntSz) {
1576 NeedsOverflowCheck = false;
1577 // We can bound unsigned overflow from sub to in range signed value (this is
1578 // what allows us to avoid the overflow check for sub).
1579 if (IntOpc == Instruction::Sub)
1580 OutputSigned = true;
1581 }
1582
1583 // Precision check did not rule out overflow, so need to check.
1584 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1585 // `IntOps[...]` arguments to `KnownOps[...]`.
1586 if (NeedsOverflowCheck &&
1587 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1588 return nullptr;
1589
1590 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1591 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1592 IntBO->setHasNoSignedWrap(OutputSigned);
1593 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1594 }
1595 if (OutputSigned)
1596 return new SIToFPInst(IntBinOp, FPTy);
1597 return new UIToFPInst(IntBinOp, FPTy);
1598}
1599
1600// Try to fold:
1601// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1602// -> ({s|u}itofp (int_binop x, y))
1603// 2) (fp_binop ({s|u}itofp x), FpC)
1604// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1605Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1606 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1607 Constant *Op1FpC = nullptr;
1608 // Check for:
1609 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1610 // 2) (binop ({s|u}itofp x), FpC)
1611 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1612 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1613 return nullptr;
1614
1615 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1616 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1617 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1618 return nullptr;
1619
1620 // Cache KnownBits a bit to potentially save some analysis.
1621 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1622
1623 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1624 // different constraints depending on the sign of the cast.
1625 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1626 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1627 IntOps, Op1FpC, OpsKnown))
1628 return R;
1629 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1630 Op1FpC, OpsKnown);
1631}
1632
1633/// A binop with a constant operand and a sign-extended boolean operand may be
1634/// converted into a select of constants by applying the binary operation to
1635/// the constant with the two possible values of the extended boolean (0 or -1).
1636Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1637 // TODO: Handle non-commutative binop (constant is operand 0).
1638 // TODO: Handle zext.
1639 // TODO: Peek through 'not' of cast.
1640 Value *BO0 = BO.getOperand(0);
1641 Value *BO1 = BO.getOperand(1);
1642 Value *X;
1643 Constant *C;
1644 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1645 !X->getType()->isIntOrIntVectorTy(1))
1646 return nullptr;
1647
1648 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1651 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1652 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1653 return SelectInst::Create(X, TVal, FVal);
1654}
1655
1657 bool IsTrueArm) {
1659 for (Value *Op : I.operands()) {
1660 Value *V = nullptr;
1661 if (Op == SI) {
1662 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1663 } else if (match(SI->getCondition(),
1666 m_Specific(Op), m_Value(V))) &&
1668 // Pass
1669 } else {
1670 V = Op;
1671 }
1672 Ops.push_back(V);
1673 }
1674
1675 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1676}
1677
1679 Value *NewOp, InstCombiner &IC) {
1680 Instruction *Clone = I.clone();
1681 Clone->replaceUsesOfWith(SI, NewOp);
1683 IC.InsertNewInstBefore(Clone, I.getIterator());
1684 return Clone;
1685}
1686
1688 bool FoldWithMultiUse) {
1689 // Don't modify shared select instructions unless set FoldWithMultiUse
1690 if (!SI->hasOneUse() && !FoldWithMultiUse)
1691 return nullptr;
1692
1693 Value *TV = SI->getTrueValue();
1694 Value *FV = SI->getFalseValue();
1695
1696 // Bool selects with constant operands can be folded to logical ops.
1697 if (SI->getType()->isIntOrIntVectorTy(1))
1698 return nullptr;
1699
1700 // Test if a FCmpInst instruction is used exclusively by a select as
1701 // part of a minimum or maximum operation. If so, refrain from doing
1702 // any other folding. This helps out other analyses which understand
1703 // non-obfuscated minimum and maximum idioms. And in this case, at
1704 // least one of the comparison operands has at least one user besides
1705 // the compare (the select), which would often largely negate the
1706 // benefit of folding anyway.
1707 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1708 if (CI->hasOneUse()) {
1709 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1710 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1711 return nullptr;
1712 }
1713 }
1714
1715 // Make sure that one of the select arms folds successfully.
1716 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1717 Value *NewFV =
1718 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1719 if (!NewTV && !NewFV)
1720 return nullptr;
1721
1722 // Create an instruction for the arm that did not fold.
1723 if (!NewTV)
1724 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1725 if (!NewFV)
1726 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1727 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1728}
1729
1731 Value *InValue, BasicBlock *InBB,
1732 const DataLayout &DL,
1733 const SimplifyQuery SQ) {
1734 // NB: It is a precondition of this transform that the operands be
1735 // phi translatable!
1737 for (Value *Op : I.operands()) {
1738 if (Op == PN)
1739 Ops.push_back(InValue);
1740 else
1741 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1742 }
1743
1744 // Don't consider the simplification successful if we get back a constant
1745 // expression. That's just an instruction in hiding.
1746 // Also reject the case where we simplify back to the phi node. We wouldn't
1747 // be able to remove it in that case.
1749 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1750 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1751 return NewVal;
1752
1753 // Check if incoming PHI value can be replaced with constant
1754 // based on implied condition.
1755 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1756 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1757 if (TerminatorBI && TerminatorBI->isConditional() &&
1758 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1759 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1760 std::optional<bool> ImpliedCond = isImpliedCondition(
1761 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1762 DL, LHSIsTrue);
1763 if (ImpliedCond)
1764 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1765 }
1766
1767 return nullptr;
1768}
1769
1771 bool AllowMultipleUses) {
1772 unsigned NumPHIValues = PN->getNumIncomingValues();
1773 if (NumPHIValues == 0)
1774 return nullptr;
1775
1776 // We normally only transform phis with a single use. However, if a PHI has
1777 // multiple uses and they are all the same operation, we can fold *all* of the
1778 // uses into the PHI.
1779 bool OneUse = PN->hasOneUse();
1780 bool IdenticalUsers = false;
1781 if (!AllowMultipleUses && !OneUse) {
1782 // Walk the use list for the instruction, comparing them to I.
1783 for (User *U : PN->users()) {
1784 Instruction *UI = cast<Instruction>(U);
1785 if (UI != &I && !I.isIdenticalTo(UI))
1786 return nullptr;
1787 }
1788 // Otherwise, we can replace *all* users with the new PHI we form.
1789 IdenticalUsers = true;
1790 }
1791
1792 // Check that all operands are phi-translatable.
1793 for (Value *Op : I.operands()) {
1794 if (Op == PN)
1795 continue;
1796
1797 // Non-instructions never require phi-translation.
1798 auto *I = dyn_cast<Instruction>(Op);
1799 if (!I)
1800 continue;
1801
1802 // Phi-translate can handle phi nodes in the same block.
1803 if (isa<PHINode>(I))
1804 if (I->getParent() == PN->getParent())
1805 continue;
1806
1807 // Operand dominates the block, no phi-translation necessary.
1808 if (DT.dominates(I, PN->getParent()))
1809 continue;
1810
1811 // Not phi-translatable, bail out.
1812 return nullptr;
1813 }
1814
1815 // Check to see whether the instruction can be folded into each phi operand.
1816 // If there is one operand that does not fold, remember the BB it is in.
1817 SmallVector<Value *> NewPhiValues;
1818 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1819 bool SeenNonSimplifiedInVal = false;
1820 for (unsigned i = 0; i != NumPHIValues; ++i) {
1821 Value *InVal = PN->getIncomingValue(i);
1822 BasicBlock *InBB = PN->getIncomingBlock(i);
1823
1824 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1825 NewPhiValues.push_back(NewVal);
1826 continue;
1827 }
1828
1829 // Handle some cases that can't be fully simplified, but where we know that
1830 // the two instructions will fold into one.
1831 auto WillFold = [&]() {
1832 if (!InVal->hasOneUser())
1833 return false;
1834
1835 // icmp of ucmp/scmp with constant will fold to icmp.
1836 const APInt *Ignored;
1837 if (isa<CmpIntrinsic>(InVal) &&
1838 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1839 return true;
1840
1841 // icmp eq zext(bool), 0 will fold to !bool.
1842 if (isa<ZExtInst>(InVal) &&
1843 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1844 match(&I,
1846 return true;
1847
1848 return false;
1849 };
1850
1851 if (WillFold()) {
1852 OpsToMoveUseToIncomingBB.push_back(i);
1853 NewPhiValues.push_back(nullptr);
1854 continue;
1855 }
1856
1857 if (!OneUse && !IdenticalUsers)
1858 return nullptr;
1859
1860 if (SeenNonSimplifiedInVal)
1861 return nullptr; // More than one non-simplified value.
1862 SeenNonSimplifiedInVal = true;
1863
1864 // If there is exactly one non-simplified value, we can insert a copy of the
1865 // operation in that block. However, if this is a critical edge, we would
1866 // be inserting the computation on some other paths (e.g. inside a loop).
1867 // Only do this if the pred block is unconditionally branching into the phi
1868 // block. Also, make sure that the pred block is not dead code.
1869 BranchInst *BI = dyn_cast<BranchInst>(InBB->getTerminator());
1870 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1871 return nullptr;
1872
1873 NewPhiValues.push_back(nullptr);
1874 OpsToMoveUseToIncomingBB.push_back(i);
1875
1876 // If the InVal is an invoke at the end of the pred block, then we can't
1877 // insert a computation after it without breaking the edge.
1878 if (isa<InvokeInst>(InVal))
1879 if (cast<Instruction>(InVal)->getParent() == InBB)
1880 return nullptr;
1881
1882 // Do not push the operation across a loop backedge. This could result in
1883 // an infinite combine loop, and is generally non-profitable (especially
1884 // if the operation was originally outside the loop).
1885 if (isBackEdge(InBB, PN->getParent()))
1886 return nullptr;
1887 }
1888
1889 // Clone the instruction that uses the phi node and move it into the incoming
1890 // BB because we know that the next iteration of InstCombine will simplify it.
1892 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1894 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1895
1896 Instruction *Clone = Clones.lookup(OpBB);
1897 if (!Clone) {
1898 Clone = I.clone();
1899 for (Use &U : Clone->operands()) {
1900 if (U == PN)
1901 U = Op;
1902 else
1903 U = U->DoPHITranslation(PN->getParent(), OpBB);
1904 }
1905 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1906 Clones.insert({OpBB, Clone});
1907 }
1908
1909 NewPhiValues[OpIndex] = Clone;
1910 }
1911
1912 // Okay, we can do the transformation: create the new PHI node.
1913 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1914 InsertNewInstBefore(NewPN, PN->getIterator());
1915 NewPN->takeName(PN);
1916 NewPN->setDebugLoc(PN->getDebugLoc());
1917
1918 for (unsigned i = 0; i != NumPHIValues; ++i)
1919 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1920
1921 if (IdenticalUsers) {
1922 for (User *U : make_early_inc_range(PN->users())) {
1923 Instruction *User = cast<Instruction>(U);
1924 if (User == &I)
1925 continue;
1926 replaceInstUsesWith(*User, NewPN);
1928 }
1929 OneUse = true;
1930 }
1931
1932 if (OneUse) {
1933 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1934 const_cast<PHINode &>(*NewPN),
1935 const_cast<PHINode &>(*PN), DT);
1936 }
1937 return replaceInstUsesWith(I, NewPN);
1938}
1939
1941 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1942 // we are guarding against replicating the binop in >1 predecessor.
1943 // This could miss matching a phi with 2 constant incoming values.
1944 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1945 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1946 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1947 Phi0->getNumOperands() != Phi1->getNumOperands())
1948 return nullptr;
1949
1950 // TODO: Remove the restriction for binop being in the same block as the phis.
1951 if (BO.getParent() != Phi0->getParent() ||
1952 BO.getParent() != Phi1->getParent())
1953 return nullptr;
1954
1955 // Fold if there is at least one specific constant value in phi0 or phi1's
1956 // incoming values that comes from the same block and this specific constant
1957 // value can be used to do optimization for specific binary operator.
1958 // For example:
1959 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1960 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1961 // %add = add i32 %phi0, %phi1
1962 // ==>
1963 // %add = phi i32 [%j, %bb0], [%i, %bb1]
1965 /*AllowRHSConstant*/ false);
1966 if (C) {
1967 SmallVector<Value *, 4> NewIncomingValues;
1968 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1969 auto &Phi0Use = std::get<0>(T);
1970 auto &Phi1Use = std::get<1>(T);
1971 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1972 return false;
1973 Value *Phi0UseV = Phi0Use.get();
1974 Value *Phi1UseV = Phi1Use.get();
1975 if (Phi0UseV == C)
1976 NewIncomingValues.push_back(Phi1UseV);
1977 else if (Phi1UseV == C)
1978 NewIncomingValues.push_back(Phi0UseV);
1979 else
1980 return false;
1981 return true;
1982 };
1983
1984 if (all_of(zip(Phi0->operands(), Phi1->operands()),
1985 CanFoldIncomingValuePair)) {
1986 PHINode *NewPhi =
1987 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1988 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1989 "The number of collected incoming values should equal the number "
1990 "of the original PHINode operands!");
1991 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1992 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1993 return NewPhi;
1994 }
1995 }
1996
1997 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1998 return nullptr;
1999
2000 // Match a pair of incoming constants for one of the predecessor blocks.
2001 BasicBlock *ConstBB, *OtherBB;
2002 Constant *C0, *C1;
2003 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2004 ConstBB = Phi0->getIncomingBlock(0);
2005 OtherBB = Phi0->getIncomingBlock(1);
2006 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2007 ConstBB = Phi0->getIncomingBlock(1);
2008 OtherBB = Phi0->getIncomingBlock(0);
2009 } else {
2010 return nullptr;
2011 }
2012 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2013 return nullptr;
2014
2015 // The block that we are hoisting to must reach here unconditionally.
2016 // Otherwise, we could be speculatively executing an expensive or
2017 // non-speculative op.
2018 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2019 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2020 !DT.isReachableFromEntry(OtherBB))
2021 return nullptr;
2022
2023 // TODO: This check could be tightened to only apply to binops (div/rem) that
2024 // are not safe to speculatively execute. But that could allow hoisting
2025 // potentially expensive instructions (fdiv for example).
2026 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2028 return nullptr;
2029
2030 // Fold constants for the predecessor block with constant incoming values.
2031 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2032 if (!NewC)
2033 return nullptr;
2034
2035 // Make a new binop in the predecessor block with the non-constant incoming
2036 // values.
2037 Builder.SetInsertPoint(PredBlockBranch);
2038 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2039 Phi0->getIncomingValueForBlock(OtherBB),
2040 Phi1->getIncomingValueForBlock(OtherBB));
2041 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2042 NotFoldedNewBO->copyIRFlags(&BO);
2043
2044 // Replace the binop with a phi of the new values. The old phis are dead.
2045 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2046 NewPhi->addIncoming(NewBO, OtherBB);
2047 NewPhi->addIncoming(NewC, ConstBB);
2048 return NewPhi;
2049}
2050
2052 if (!isa<Constant>(I.getOperand(1)))
2053 return nullptr;
2054
2055 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2056 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2057 return NewSel;
2058 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2059 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2060 return NewPhi;
2061 }
2062 return nullptr;
2063}
2064
2066 // If this GEP has only 0 indices, it is the same pointer as
2067 // Src. If Src is not a trivial GEP too, don't combine
2068 // the indices.
2069 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2070 !Src.hasOneUse())
2071 return false;
2072 return true;
2073}
2074
2076 if (!isa<VectorType>(Inst.getType()))
2077 return nullptr;
2078
2079 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2080 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2081 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2082 cast<VectorType>(Inst.getType())->getElementCount());
2083 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2084 cast<VectorType>(Inst.getType())->getElementCount());
2085
2086 // If both operands of the binop are vector concatenations, then perform the
2087 // narrow binop on each pair of the source operands followed by concatenation
2088 // of the results.
2089 Value *L0, *L1, *R0, *R1;
2090 ArrayRef<int> Mask;
2091 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2092 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2093 LHS->hasOneUse() && RHS->hasOneUse() &&
2094 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2095 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2096 // This transform does not have the speculative execution constraint as
2097 // below because the shuffle is a concatenation. The new binops are
2098 // operating on exactly the same elements as the existing binop.
2099 // TODO: We could ease the mask requirement to allow different undef lanes,
2100 // but that requires an analysis of the binop-with-undef output value.
2101 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2102 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2103 BO->copyIRFlags(&Inst);
2104 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2105 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2106 BO->copyIRFlags(&Inst);
2107 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2108 }
2109
2110 auto createBinOpReverse = [&](Value *X, Value *Y) {
2111 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2112 if (auto *BO = dyn_cast<BinaryOperator>(V))
2113 BO->copyIRFlags(&Inst);
2114 Module *M = Inst.getModule();
2116 M, Intrinsic::vector_reverse, V->getType());
2117 return CallInst::Create(F, V);
2118 };
2119
2120 // NOTE: Reverse shuffles don't require the speculative execution protection
2121 // below because they don't affect which lanes take part in the computation.
2122
2123 Value *V1, *V2;
2124 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2125 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2126 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2127 (LHS->hasOneUse() || RHS->hasOneUse() ||
2128 (LHS == RHS && LHS->hasNUses(2))))
2129 return createBinOpReverse(V1, V2);
2130
2131 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2132 if (LHS->hasOneUse() && isSplatValue(RHS))
2133 return createBinOpReverse(V1, RHS);
2134 }
2135 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2136 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2137 return createBinOpReverse(LHS, V2);
2138
2139 // It may not be safe to reorder shuffles and things like div, urem, etc.
2140 // because we may trap when executing those ops on unknown vector elements.
2141 // See PR20059.
2143 return nullptr;
2144
2145 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2146 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2147 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2148 BO->copyIRFlags(&Inst);
2149 return new ShuffleVectorInst(XY, M);
2150 };
2151
2152 // If both arguments of the binary operation are shuffles that use the same
2153 // mask and shuffle within a single vector, move the shuffle after the binop.
2154 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2155 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2156 V1->getType() == V2->getType() &&
2157 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2158 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2159 return createBinOpShuffle(V1, V2, Mask);
2160 }
2161
2162 // If both arguments of a commutative binop are select-shuffles that use the
2163 // same mask with commuted operands, the shuffles are unnecessary.
2164 if (Inst.isCommutative() &&
2165 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2166 match(RHS,
2167 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2168 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2169 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2170 // TODO: Allow shuffles that contain undefs in the mask?
2171 // That is legal, but it reduces undef knowledge.
2172 // TODO: Allow arbitrary shuffles by shuffling after binop?
2173 // That might be legal, but we have to deal with poison.
2174 if (LShuf->isSelect() &&
2175 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2176 RShuf->isSelect() &&
2177 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2178 // Example:
2179 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2180 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2181 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2182 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2183 NewBO->copyIRFlags(&Inst);
2184 return NewBO;
2185 }
2186 }
2187
2188 // If one argument is a shuffle within one vector and the other is a constant,
2189 // try moving the shuffle after the binary operation. This canonicalization
2190 // intends to move shuffles closer to other shuffles and binops closer to
2191 // other binops, so they can be folded. It may also enable demanded elements
2192 // transforms.
2193 Constant *C;
2194 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
2195 if (InstVTy &&
2197 m_Mask(Mask))),
2198 m_ImmConstant(C))) &&
2199 cast<FixedVectorType>(V1->getType())->getNumElements() <=
2200 InstVTy->getNumElements()) {
2201 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
2202 "Shuffle should not change scalar type");
2203
2204 // Find constant NewC that has property:
2205 // shuffle(NewC, ShMask) = C
2206 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
2207 // reorder is not possible. A 1-to-1 mapping is not required. Example:
2208 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
2209 bool ConstOp1 = isa<Constant>(RHS);
2210 ArrayRef<int> ShMask = Mask;
2211 unsigned SrcVecNumElts =
2212 cast<FixedVectorType>(V1->getType())->getNumElements();
2213 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2214 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar);
2215 bool MayChange = true;
2216 unsigned NumElts = InstVTy->getNumElements();
2217 for (unsigned I = 0; I < NumElts; ++I) {
2218 Constant *CElt = C->getAggregateElement(I);
2219 if (ShMask[I] >= 0) {
2220 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2221 Constant *NewCElt = NewVecC[ShMask[I]];
2222 // Bail out if:
2223 // 1. The constant vector contains a constant expression.
2224 // 2. The shuffle needs an element of the constant vector that can't
2225 // be mapped to a new constant vector.
2226 // 3. This is a widening shuffle that copies elements of V1 into the
2227 // extended elements (extending with poison is allowed).
2228 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2229 I >= SrcVecNumElts) {
2230 MayChange = false;
2231 break;
2232 }
2233 NewVecC[ShMask[I]] = CElt;
2234 }
2235 // If this is a widening shuffle, we must be able to extend with poison
2236 // elements. If the original binop does not produce a poison in the high
2237 // lanes, then this transform is not safe.
2238 // Similarly for poison lanes due to the shuffle mask, we can only
2239 // transform binops that preserve poison.
2240 // TODO: We could shuffle those non-poison constant values into the
2241 // result by using a constant vector (rather than an poison vector)
2242 // as operand 1 of the new binop, but that might be too aggressive
2243 // for target-independent shuffle creation.
2244 if (I >= SrcVecNumElts || ShMask[I] < 0) {
2245 Constant *MaybePoison =
2246 ConstOp1
2247 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL)
2248 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL);
2249 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2250 MayChange = false;
2251 break;
2252 }
2253 }
2254 }
2255 if (MayChange) {
2256 Constant *NewC = ConstantVector::get(NewVecC);
2257 // It may not be safe to execute a binop on a vector with poison elements
2258 // because the entire instruction can be folded to undef or create poison
2259 // that did not exist in the original code.
2260 // TODO: The shift case should not be necessary.
2261 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
2262 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2263
2264 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2265 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2266 Value *NewLHS = ConstOp1 ? V1 : NewC;
2267 Value *NewRHS = ConstOp1 ? NewC : V1;
2268 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2269 }
2270 }
2271
2272 // Try to reassociate to sink a splat shuffle after a binary operation.
2273 if (Inst.isAssociative() && Inst.isCommutative()) {
2274 // Canonicalize shuffle operand as LHS.
2275 if (isa<ShuffleVectorInst>(RHS))
2276 std::swap(LHS, RHS);
2277
2278 Value *X;
2279 ArrayRef<int> MaskC;
2280 int SplatIndex;
2281 Value *Y, *OtherOp;
2282 if (!match(LHS,
2283 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2284 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2285 X->getType() != Inst.getType() ||
2286 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2287 return nullptr;
2288
2289 // FIXME: This may not be safe if the analysis allows undef elements. By
2290 // moving 'Y' before the splat shuffle, we are implicitly assuming
2291 // that it is not undef/poison at the splat index.
2292 if (isSplatValue(OtherOp, SplatIndex)) {
2293 std::swap(Y, OtherOp);
2294 } else if (!isSplatValue(Y, SplatIndex)) {
2295 return nullptr;
2296 }
2297
2298 // X and Y are splatted values, so perform the binary operation on those
2299 // values followed by a splat followed by the 2nd binary operation:
2300 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2301 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2302 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2303 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2304 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2305
2306 // Intersect FMF on both new binops. Other (poison-generating) flags are
2307 // dropped to be safe.
2308 if (isa<FPMathOperator>(R)) {
2309 R->copyFastMathFlags(&Inst);
2310 R->andIRFlags(RHS);
2311 }
2312 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2313 NewInstBO->copyIRFlags(R);
2314 return R;
2315 }
2316
2317 return nullptr;
2318}
2319
2320/// Try to narrow the width of a binop if at least 1 operand is an extend of
2321/// of a value. This requires a potentially expensive known bits check to make
2322/// sure the narrow op does not overflow.
2323Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2324 // We need at least one extended operand.
2325 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2326
2327 // If this is a sub, we swap the operands since we always want an extension
2328 // on the RHS. The LHS can be an extension or a constant.
2329 if (BO.getOpcode() == Instruction::Sub)
2330 std::swap(Op0, Op1);
2331
2332 Value *X;
2333 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2334 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2335 return nullptr;
2336
2337 // If both operands are the same extension from the same source type and we
2338 // can eliminate at least one (hasOneUse), this might work.
2339 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2340 Value *Y;
2341 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2342 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2343 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2344 // If that did not match, see if we have a suitable constant operand.
2345 // Truncating and extending must produce the same constant.
2346 Constant *WideC;
2347 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2348 return nullptr;
2349 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
2350 if (!NarrowC)
2351 return nullptr;
2352 Y = NarrowC;
2353 }
2354
2355 // Swap back now that we found our operands.
2356 if (BO.getOpcode() == Instruction::Sub)
2357 std::swap(X, Y);
2358
2359 // Both operands have narrow versions. Last step: the math must not overflow
2360 // in the narrow width.
2361 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2362 return nullptr;
2363
2364 // bo (ext X), (ext Y) --> ext (bo X, Y)
2365 // bo (ext X), C --> ext (bo X, C')
2366 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2367 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2368 if (IsSext)
2369 NewBinOp->setHasNoSignedWrap();
2370 else
2371 NewBinOp->setHasNoUnsignedWrap();
2372 }
2373 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2374}
2375
2376/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2377/// transform.
2379 GEPOperator &GEP2) {
2381}
2382
2383/// Thread a GEP operation with constant indices through the constant true/false
2384/// arms of a select.
2386 InstCombiner::BuilderTy &Builder) {
2387 if (!GEP.hasAllConstantIndices())
2388 return nullptr;
2389
2390 Instruction *Sel;
2391 Value *Cond;
2392 Constant *TrueC, *FalseC;
2393 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2394 !match(Sel,
2395 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2396 return nullptr;
2397
2398 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2399 // Propagate 'inbounds' and metadata from existing instructions.
2400 // Note: using IRBuilder to create the constants for efficiency.
2401 SmallVector<Value *, 4> IndexC(GEP.indices());
2402 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2403 Type *Ty = GEP.getSourceElementType();
2404 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2405 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2406 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2407}
2408
2409// Canonicalization:
2410// gep T, (gep i8, base, C1), (Index + C2) into
2411// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2413 GEPOperator *Src,
2414 InstCombinerImpl &IC) {
2415 if (GEP.getNumIndices() != 1)
2416 return nullptr;
2417 auto &DL = IC.getDataLayout();
2418 Value *Base;
2419 const APInt *C1;
2420 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2421 return nullptr;
2422 Value *VarIndex;
2423 const APInt *C2;
2424 Type *PtrTy = Src->getType()->getScalarType();
2425 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2426 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2427 return nullptr;
2428 if (C1->getBitWidth() != IndexSizeInBits ||
2429 C2->getBitWidth() != IndexSizeInBits)
2430 return nullptr;
2431 Type *BaseType = GEP.getSourceElementType();
2432 if (isa<ScalableVectorType>(BaseType))
2433 return nullptr;
2434 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2435 APInt NewOffset = TypeSize * *C2 + *C1;
2436 if (NewOffset.isZero() ||
2437 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2438 Value *GEPConst =
2439 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset));
2440 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex);
2441 }
2442
2443 return nullptr;
2444}
2445
2447 GEPOperator *Src) {
2448 // Combine Indices - If the source pointer to this getelementptr instruction
2449 // is a getelementptr instruction with matching element type, combine the
2450 // indices of the two getelementptr instructions into a single instruction.
2451 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2452 return nullptr;
2453
2454 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2455 return I;
2456
2457 // For constant GEPs, use a more general offset-based folding approach.
2458 Type *PtrTy = Src->getType()->getScalarType();
2459 if (GEP.hasAllConstantIndices() &&
2460 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2461 // Split Src into a variable part and a constant suffix.
2463 Type *BaseType = GTI.getIndexedType();
2464 bool IsFirstType = true;
2465 unsigned NumVarIndices = 0;
2466 for (auto Pair : enumerate(Src->indices())) {
2467 if (!isa<ConstantInt>(Pair.value())) {
2468 BaseType = GTI.getIndexedType();
2469 IsFirstType = false;
2470 NumVarIndices = Pair.index() + 1;
2471 }
2472 ++GTI;
2473 }
2474
2475 // Determine the offset for the constant suffix of Src.
2477 if (NumVarIndices != Src->getNumIndices()) {
2478 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2479 if (BaseType->isScalableTy())
2480 return nullptr;
2481
2482 SmallVector<Value *> ConstantIndices;
2483 if (!IsFirstType)
2484 ConstantIndices.push_back(
2486 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2487 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2488 }
2489
2490 // Add the offset for GEP (which is fully constant).
2491 if (!GEP.accumulateConstantOffset(DL, Offset))
2492 return nullptr;
2493
2494 // Convert the total offset back into indices.
2495 SmallVector<APInt> ConstIndices =
2497 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2498 return nullptr;
2499
2500 GEPNoWrapFlags NW = getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP));
2501 SmallVector<Value *> Indices;
2502 append_range(Indices, drop_end(Src->indices(),
2503 Src->getNumIndices() - NumVarIndices));
2504 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2505 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2506 // Even if the total offset is inbounds, we may end up representing it
2507 // by first performing a larger negative offset, and then a smaller
2508 // positive one. The large negative offset might go out of bounds. Only
2509 // preserve inbounds if all signs are the same.
2510 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2512 if (!Idx.isNonNegative())
2513 NW = NW.withoutNoUnsignedWrap();
2514 }
2515
2516 return replaceInstUsesWith(
2517 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2518 Indices, "", NW));
2519 }
2520
2521 if (Src->getResultElementType() != GEP.getSourceElementType())
2522 return nullptr;
2523
2524 SmallVector<Value*, 8> Indices;
2525
2526 // Find out whether the last index in the source GEP is a sequential idx.
2527 bool EndsWithSequential = false;
2528 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2529 I != E; ++I)
2530 EndsWithSequential = I.isSequential();
2531
2532 // Can we combine the two pointer arithmetics offsets?
2533 if (EndsWithSequential) {
2534 // Replace: gep (gep %P, long B), long A, ...
2535 // With: T = long A+B; gep %P, T, ...
2536 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2537 Value *GO1 = GEP.getOperand(1);
2538
2539 // If they aren't the same type, then the input hasn't been processed
2540 // by the loop above yet (which canonicalizes sequential index types to
2541 // intptr_t). Just avoid transforming this until the input has been
2542 // normalized.
2543 if (SO1->getType() != GO1->getType())
2544 return nullptr;
2545
2546 Value *Sum =
2547 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2548 // Only do the combine when we are sure the cost after the
2549 // merge is never more than that before the merge.
2550 if (Sum == nullptr)
2551 return nullptr;
2552
2553 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2554 Indices.push_back(Sum);
2555 Indices.append(GEP.op_begin()+2, GEP.op_end());
2556 } else if (isa<Constant>(*GEP.idx_begin()) &&
2557 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2558 Src->getNumOperands() != 1) {
2559 // Otherwise we can do the fold if the first index of the GEP is a zero
2560 Indices.append(Src->op_begin()+1, Src->op_end());
2561 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2562 }
2563
2564 if (!Indices.empty())
2565 return replaceInstUsesWith(
2567 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2568 getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
2569
2570 return nullptr;
2571}
2572
2574 BuilderTy *Builder,
2575 bool &DoesConsume, unsigned Depth) {
2576 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2577 // ~(~(X)) -> X.
2578 Value *A, *B;
2579 if (match(V, m_Not(m_Value(A)))) {
2580 DoesConsume = true;
2581 return A;
2582 }
2583
2584 Constant *C;
2585 // Constants can be considered to be not'ed values.
2586 if (match(V, m_ImmConstant(C)))
2587 return ConstantExpr::getNot(C);
2588
2590 return nullptr;
2591
2592 // The rest of the cases require that we invert all uses so don't bother
2593 // doing the analysis if we know we can't use the result.
2594 if (!WillInvertAllUses)
2595 return nullptr;
2596
2597 // Compares can be inverted if all of their uses are being modified to use
2598 // the ~V.
2599 if (auto *I = dyn_cast<CmpInst>(V)) {
2600 if (Builder != nullptr)
2601 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2602 I->getOperand(1));
2603 return NonNull;
2604 }
2605
2606 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2607 // `(-1 - B) - A` if we are willing to invert all of the uses.
2608 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2609 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2610 DoesConsume, Depth))
2611 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2612 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2613 DoesConsume, Depth))
2614 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2615 return nullptr;
2616 }
2617
2618 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2619 // into `A ^ B` if we are willing to invert all of the uses.
2620 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2621 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2622 DoesConsume, Depth))
2623 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2624 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2625 DoesConsume, Depth))
2626 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2627 return nullptr;
2628 }
2629
2630 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2631 // `A + (-1 - B)` if we are willing to invert all of the uses.
2632 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2633 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2634 DoesConsume, Depth))
2635 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2636 return nullptr;
2637 }
2638
2639 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2640 // into `A s>> B` if we are willing to invert all of the uses.
2641 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2642 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2643 DoesConsume, Depth))
2644 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2645 return nullptr;
2646 }
2647
2648 Value *Cond;
2649 // LogicOps are special in that we canonicalize them at the cost of an
2650 // instruction.
2651 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2652 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2653 // Selects/min/max with invertible operands are freely invertible
2654 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2655 bool LocalDoesConsume = DoesConsume;
2656 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2657 LocalDoesConsume, Depth))
2658 return nullptr;
2659 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2660 LocalDoesConsume, Depth)) {
2661 DoesConsume = LocalDoesConsume;
2662 if (Builder != nullptr) {
2663 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2664 DoesConsume, Depth);
2665 assert(NotB != nullptr &&
2666 "Unable to build inverted value for known freely invertable op");
2667 if (auto *II = dyn_cast<IntrinsicInst>(V))
2669 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2670 return Builder->CreateSelect(Cond, NotA, NotB);
2671 }
2672 return NonNull;
2673 }
2674 }
2675
2676 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2677 bool LocalDoesConsume = DoesConsume;
2679 for (Use &U : PN->operands()) {
2680 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2681 Value *NewIncomingVal = getFreelyInvertedImpl(
2682 U.get(), /*WillInvertAllUses=*/false,
2683 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2684 if (NewIncomingVal == nullptr)
2685 return nullptr;
2686 // Make sure that we can safely erase the original PHI node.
2687 if (NewIncomingVal == V)
2688 return nullptr;
2689 if (Builder != nullptr)
2690 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2691 }
2692
2693 DoesConsume = LocalDoesConsume;
2694 if (Builder != nullptr) {
2697 PHINode *NewPN =
2698 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2699 for (auto [Val, Pred] : IncomingValues)
2700 NewPN->addIncoming(Val, Pred);
2701 return NewPN;
2702 }
2703 return NonNull;
2704 }
2705
2706 if (match(V, m_SExtLike(m_Value(A)))) {
2707 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2708 DoesConsume, Depth))
2709 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2710 return nullptr;
2711 }
2712
2713 if (match(V, m_Trunc(m_Value(A)))) {
2714 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2715 DoesConsume, Depth))
2716 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2717 return nullptr;
2718 }
2719
2720 // De Morgan's Laws:
2721 // (~(A | B)) -> (~A & ~B)
2722 // (~(A & B)) -> (~A | ~B)
2723 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2724 bool IsLogical, Value *A,
2725 Value *B) -> Value * {
2726 bool LocalDoesConsume = DoesConsume;
2727 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2728 LocalDoesConsume, Depth))
2729 return nullptr;
2730 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2731 LocalDoesConsume, Depth)) {
2732 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2733 LocalDoesConsume, Depth);
2734 DoesConsume = LocalDoesConsume;
2735 if (IsLogical)
2736 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2737 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2738 }
2739
2740 return nullptr;
2741 };
2742
2743 if (match(V, m_Or(m_Value(A), m_Value(B))))
2744 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2745 B);
2746
2747 if (match(V, m_And(m_Value(A), m_Value(B))))
2748 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2749 B);
2750
2751 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2752 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2753 B);
2754
2755 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2756 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2757 B);
2758
2759 return nullptr;
2760}
2761
2762/// Return true if we should canonicalize the gep to an i8 ptradd.
2764 Value *PtrOp = GEP.getOperand(0);
2765 Type *GEPEltType = GEP.getSourceElementType();
2766 if (GEPEltType->isIntegerTy(8))
2767 return false;
2768
2769 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
2770 // intrinsic. This has better support in BasicAA.
2771 if (GEPEltType->isScalableTy())
2772 return true;
2773
2774 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
2775 // together.
2776 if (GEP.getNumIndices() == 1 &&
2777 match(GEP.getOperand(1),
2779 m_Shl(m_Value(), m_ConstantInt())))))
2780 return true;
2781
2782 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
2783 // possibly be merged together.
2784 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2785 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2786 any_of(GEP.indices(), [](Value *V) {
2787 const APInt *C;
2788 return match(V, m_APInt(C)) && !C->isZero();
2789 });
2790}
2791
2793 IRBuilderBase &Builder) {
2794 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2795 if (!Op1)
2796 return nullptr;
2797
2798 // Don't fold a GEP into itself through a PHI node. This can only happen
2799 // through the back-edge of a loop. Folding a GEP into itself means that
2800 // the value of the previous iteration needs to be stored in the meantime,
2801 // thus requiring an additional register variable to be live, but not
2802 // actually achieving anything (the GEP still needs to be executed once per
2803 // loop iteration).
2804 if (Op1 == &GEP)
2805 return nullptr;
2806 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
2807
2808 int DI = -1;
2809
2810 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2811 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2812 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2813 Op1->getSourceElementType() != Op2->getSourceElementType())
2814 return nullptr;
2815
2816 // As for Op1 above, don't try to fold a GEP into itself.
2817 if (Op2 == &GEP)
2818 return nullptr;
2819
2820 // Keep track of the type as we walk the GEP.
2821 Type *CurTy = nullptr;
2822
2823 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2824 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2825 return nullptr;
2826
2827 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2828 if (DI == -1) {
2829 // We have not seen any differences yet in the GEPs feeding the
2830 // PHI yet, so we record this one if it is allowed to be a
2831 // variable.
2832
2833 // The first two arguments can vary for any GEP, the rest have to be
2834 // static for struct slots
2835 if (J > 1) {
2836 assert(CurTy && "No current type?");
2837 if (CurTy->isStructTy())
2838 return nullptr;
2839 }
2840
2841 DI = J;
2842 } else {
2843 // The GEP is different by more than one input. While this could be
2844 // extended to support GEPs that vary by more than one variable it
2845 // doesn't make sense since it greatly increases the complexity and
2846 // would result in an R+R+R addressing mode which no backend
2847 // directly supports and would need to be broken into several
2848 // simpler instructions anyway.
2849 return nullptr;
2850 }
2851 }
2852
2853 // Sink down a layer of the type for the next iteration.
2854 if (J > 0) {
2855 if (J == 1) {
2856 CurTy = Op1->getSourceElementType();
2857 } else {
2858 CurTy =
2859 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2860 }
2861 }
2862 }
2863
2864 NW &= Op2->getNoWrapFlags();
2865 }
2866
2867 // If not all GEPs are identical we'll have to create a new PHI node.
2868 // Check that the old PHI node has only one use so that it will get
2869 // removed.
2870 if (DI != -1 && !PN->hasOneUse())
2871 return nullptr;
2872
2873 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2874 NewGEP->setNoWrapFlags(NW);
2875
2876 if (DI == -1) {
2877 // All the GEPs feeding the PHI are identical. Clone one down into our
2878 // BB so that it can be merged with the current GEP.
2879 } else {
2880 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2881 // into the current block so it can be merged, and create a new PHI to
2882 // set that index.
2883 PHINode *NewPN;
2884 {
2885 IRBuilderBase::InsertPointGuard Guard(Builder);
2886 Builder.SetInsertPoint(PN);
2887 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2888 PN->getNumOperands());
2889 }
2890
2891 for (auto &I : PN->operands())
2892 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2893 PN->getIncomingBlock(I));
2894
2895 NewGEP->setOperand(DI, NewPN);
2896 }
2897
2898 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2899 return NewGEP;
2900}
2901
2903 Value *PtrOp = GEP.getOperand(0);
2904 SmallVector<Value *, 8> Indices(GEP.indices());
2905 Type *GEPType = GEP.getType();
2906 Type *GEPEltType = GEP.getSourceElementType();
2907 if (Value *V =
2908 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
2910 return replaceInstUsesWith(GEP, V);
2911
2912 // For vector geps, use the generic demanded vector support.
2913 // Skip if GEP return type is scalable. The number of elements is unknown at
2914 // compile-time.
2915 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2916 auto VWidth = GEPFVTy->getNumElements();
2917 APInt PoisonElts(VWidth, 0);
2918 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2919 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2920 PoisonElts)) {
2921 if (V != &GEP)
2922 return replaceInstUsesWith(GEP, V);
2923 return &GEP;
2924 }
2925
2926 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2927 // possible (decide on canonical form for pointer broadcast), 3) exploit
2928 // undef elements to decrease demanded bits
2929 }
2930
2931 // Eliminate unneeded casts for indices, and replace indices which displace
2932 // by multiples of a zero size type with zero.
2933 bool MadeChange = false;
2934
2935 // Index width may not be the same width as pointer width.
2936 // Data layout chooses the right type based on supported integer types.
2937 Type *NewScalarIndexTy =
2938 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2939
2941 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2942 ++I, ++GTI) {
2943 // Skip indices into struct types.
2944 if (GTI.isStruct())
2945 continue;
2946
2947 Type *IndexTy = (*I)->getType();
2948 Type *NewIndexType =
2949 IndexTy->isVectorTy()
2950 ? VectorType::get(NewScalarIndexTy,
2951 cast<VectorType>(IndexTy)->getElementCount())
2952 : NewScalarIndexTy;
2953
2954 // If the element type has zero size then any index over it is equivalent
2955 // to an index of zero, so replace it with zero if it is not zero already.
2956 Type *EltTy = GTI.getIndexedType();
2957 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2958 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2959 *I = Constant::getNullValue(NewIndexType);
2960 MadeChange = true;
2961 }
2962
2963 if (IndexTy != NewIndexType) {
2964 // If we are using a wider index than needed for this platform, shrink
2965 // it to what we need. If narrower, sign-extend it to what we need.
2966 // This explicit cast can make subsequent optimizations more obvious.
2967 *I = Builder.CreateIntCast(*I, NewIndexType, true);
2968 MadeChange = true;
2969 }
2970 }
2971 if (MadeChange)
2972 return &GEP;
2973
2974 // Canonicalize constant GEPs to i8 type.
2975 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
2977 if (GEP.accumulateConstantOffset(DL, Offset))
2978 return replaceInstUsesWith(
2980 GEP.getNoWrapFlags()));
2981 }
2982
2984 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
2985 Value *NewGEP =
2986 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
2987 return replaceInstUsesWith(GEP, NewGEP);
2988 }
2989
2990 // Check to see if the inputs to the PHI node are getelementptr instructions.
2991 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2992 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
2993 return replaceOperand(GEP, 0, NewPtrOp);
2994 }
2995
2996 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2997 if (Instruction *I = visitGEPOfGEP(GEP, Src))
2998 return I;
2999
3000 if (GEP.getNumIndices() == 1) {
3001 unsigned AS = GEP.getPointerAddressSpace();
3002 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3003 DL.getIndexSizeInBits(AS)) {
3004 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3005
3006 if (TyAllocSize == 1) {
3007 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3008 // but only if the result pointer is only used as if it were an integer,
3009 // or both point to the same underlying object (otherwise provenance is
3010 // not necessarily retained).
3011 Value *X = GEP.getPointerOperand();
3012 Value *Y;
3013 if (match(GEP.getOperand(1),
3015 GEPType == Y->getType()) {
3016 bool HasSameUnderlyingObject =
3018 bool Changed = false;
3019 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3020 bool ShouldReplace = HasSameUnderlyingObject ||
3021 isa<ICmpInst>(U.getUser()) ||
3022 isa<PtrToIntInst>(U.getUser());
3023 Changed |= ShouldReplace;
3024 return ShouldReplace;
3025 });
3026 return Changed ? &GEP : nullptr;
3027 }
3028 } else if (auto *ExactIns =
3029 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3030 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3031 Value *V;
3032 if (ExactIns->isExact()) {
3033 if ((has_single_bit(TyAllocSize) &&
3034 match(GEP.getOperand(1),
3035 m_Shr(m_Value(V),
3036 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3037 match(GEP.getOperand(1),
3038 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3040 GEP.getPointerOperand(), V,
3041 GEP.getNoWrapFlags());
3042 }
3043 }
3044 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3045 // Try to canonicalize non-i8 element type to i8 if the index is an
3046 // exact instruction. If the index is an exact instruction (div/shr)
3047 // with a constant RHS, we can fold the non-i8 element scale into the
3048 // div/shr (similiar to the mul case, just inverted).
3049 const APInt *C;
3050 std::optional<APInt> NewC;
3051 if (has_single_bit(TyAllocSize) &&
3052 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3053 C->uge(countr_zero(TyAllocSize)))
3054 NewC = *C - countr_zero(TyAllocSize);
3055 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3056 APInt Quot;
3057 uint64_t Rem;
3058 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3059 if (Rem == 0)
3060 NewC = Quot;
3061 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3062 APInt Quot;
3063 int64_t Rem;
3064 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3065 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3066 if (!Quot.isAllOnes() && Rem == 0)
3067 NewC = Quot;
3068 }
3069
3070 if (NewC.has_value()) {
3071 Value *NewOp = Builder.CreateBinOp(
3072 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3073 ConstantInt::get(V->getType(), *NewC));
3074 cast<BinaryOperator>(NewOp)->setIsExact();
3076 GEP.getPointerOperand(), NewOp,
3077 GEP.getNoWrapFlags());
3078 }
3079 }
3080 }
3081 }
3082 }
3083 // We do not handle pointer-vector geps here.
3084 if (GEPType->isVectorTy())
3085 return nullptr;
3086
3087 if (GEP.getNumIndices() == 1) {
3088 // We can only preserve inbounds if the original gep is inbounds, the add
3089 // is nsw, and the add operands are non-negative.
3090 auto CanPreserveInBounds = [&](bool AddIsNSW, Value *Idx1, Value *Idx2) {
3092 return GEP.isInBounds() && AddIsNSW && isKnownNonNegative(Idx1, Q) &&
3093 isKnownNonNegative(Idx2, Q);
3094 };
3095
3096 // Try to replace ADD + GEP with GEP + GEP.
3097 Value *Idx1, *Idx2;
3098 if (match(GEP.getOperand(1),
3099 m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) {
3100 // %idx = add i64 %idx1, %idx2
3101 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3102 // as:
3103 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3104 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3105 bool IsInBounds = CanPreserveInBounds(
3106 cast<OverflowingBinaryOperator>(GEP.getOperand(1))->hasNoSignedWrap(),
3107 Idx1, Idx2);
3108 auto *NewPtr =
3109 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3110 Idx1, "", IsInBounds);
3111 return replaceInstUsesWith(
3112 GEP, Builder.CreateGEP(GEP.getSourceElementType(), NewPtr, Idx2, "",
3113 IsInBounds));
3114 }
3115 ConstantInt *C;
3116 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd(
3117 m_Value(Idx1), m_ConstantInt(C))))))) {
3118 // %add = add nsw i32 %idx1, idx2
3119 // %sidx = sext i32 %add to i64
3120 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3121 // as:
3122 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3123 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3124 bool IsInBounds = CanPreserveInBounds(
3125 /*IsNSW=*/true, Idx1, C);
3126 auto *NewPtr = Builder.CreateGEP(
3127 GEP.getSourceElementType(), GEP.getPointerOperand(),
3128 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "",
3129 IsInBounds);
3130 return replaceInstUsesWith(
3131 GEP,
3132 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3133 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3134 "", IsInBounds));
3135 }
3136 }
3137
3138 if (!GEP.isInBounds()) {
3139 unsigned IdxWidth =
3141 APInt BasePtrOffset(IdxWidth, 0);
3142 Value *UnderlyingPtrOp =
3144 BasePtrOffset);
3145 bool CanBeNull, CanBeFreed;
3146 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3147 DL, CanBeNull, CanBeFreed);
3148 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3149 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3150 BasePtrOffset.isNonNegative()) {
3151 APInt AllocSize(IdxWidth, DerefBytes);
3152 if (BasePtrOffset.ule(AllocSize)) {
3154 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3155 }
3156 }
3157 }
3158 }
3159
3160 // nusw + nneg -> nuw
3161 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3162 all_of(GEP.indices(), [&](Value *Idx) {
3163 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3164 })) {
3165 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3166 return &GEP;
3167 }
3168
3170 return R;
3171
3172 return nullptr;
3173}
3174
3176 Instruction *AI) {
3177 if (isa<ConstantPointerNull>(V))
3178 return true;
3179 if (auto *LI = dyn_cast<LoadInst>(V))
3180 return isa<GlobalVariable>(LI->getPointerOperand());
3181 // Two distinct allocations will never be equal.
3182 return isAllocLikeFn(V, &TLI) && V != AI;
3183}
3184
3185/// Given a call CB which uses an address UsedV, return true if we can prove the
3186/// call's only possible effect is storing to V.
3187static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3188 const TargetLibraryInfo &TLI) {
3189 if (!CB.use_empty())
3190 // TODO: add recursion if returned attribute is present
3191 return false;
3192
3193 if (CB.isTerminator())
3194 // TODO: remove implementation restriction
3195 return false;
3196
3197 if (!CB.willReturn() || !CB.doesNotThrow())
3198 return false;
3199
3200 // If the only possible side effect of the call is writing to the alloca,
3201 // and the result isn't used, we can safely remove any reads implied by the
3202 // call including those which might read the alloca itself.
3203 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3204 return Dest && Dest->Ptr == UsedV;
3205}
3206
3209 const TargetLibraryInfo &TLI) {
3211 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3212 Worklist.push_back(AI);
3213
3214 do {
3215 Instruction *PI = Worklist.pop_back_val();
3216 for (User *U : PI->users()) {
3217 Instruction *I = cast<Instruction>(U);
3218 switch (I->getOpcode()) {
3219 default:
3220 // Give up the moment we see something we can't handle.
3221 return false;
3222
3223 case Instruction::AddrSpaceCast:
3224 case Instruction::BitCast:
3225 case Instruction::GetElementPtr:
3226 Users.emplace_back(I);
3227 Worklist.push_back(I);
3228 continue;
3229
3230 case Instruction::ICmp: {
3231 ICmpInst *ICI = cast<ICmpInst>(I);
3232 // We can fold eq/ne comparisons with null to false/true, respectively.
3233 // We also fold comparisons in some conditions provided the alloc has
3234 // not escaped (see isNeverEqualToUnescapedAlloc).
3235 if (!ICI->isEquality())
3236 return false;
3237 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3238 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3239 return false;
3240
3241 // Do not fold compares to aligned_alloc calls, as they may have to
3242 // return null in case the required alignment cannot be satisfied,
3243 // unless we can prove that both alignment and size are valid.
3244 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3245 // Check if alignment and size of a call to aligned_alloc is valid,
3246 // that is alignment is a power-of-2 and the size is a multiple of the
3247 // alignment.
3248 const APInt *Alignment;
3249 const APInt *Size;
3250 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3251 match(CB->getArgOperand(1), m_APInt(Size)) &&
3252 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3253 };
3254 auto *CB = dyn_cast<CallBase>(AI);
3255 LibFunc TheLibFunc;
3256 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3257 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3258 !AlignmentAndSizeKnownValid(CB))
3259 return false;
3260 Users.emplace_back(I);
3261 continue;
3262 }
3263
3264 case Instruction::Call:
3265 // Ignore no-op and store intrinsics.
3266 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3267 switch (II->getIntrinsicID()) {
3268 default:
3269 return false;
3270
3271 case Intrinsic::memmove:
3272 case Intrinsic::memcpy:
3273 case Intrinsic::memset: {
3274 MemIntrinsic *MI = cast<MemIntrinsic>(II);
3275 if (MI->isVolatile() || MI->getRawDest() != PI)
3276 return false;
3277 [[fallthrough]];
3278 }
3279 case Intrinsic::assume:
3280 case Intrinsic::invariant_start:
3281 case Intrinsic::invariant_end:
3282 case Intrinsic::lifetime_start:
3283 case Intrinsic::lifetime_end:
3284 case Intrinsic::objectsize:
3285 Users.emplace_back(I);
3286 continue;
3287 case Intrinsic::launder_invariant_group:
3288 case Intrinsic::strip_invariant_group:
3289 Users.emplace_back(I);
3290 Worklist.push_back(I);
3291 continue;
3292 }
3293 }
3294
3295 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3296 Users.emplace_back(I);
3297 continue;
3298 }
3299
3300 if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3301 getAllocationFamily(I, &TLI) == Family) {
3302 assert(Family);
3303 Users.emplace_back(I);
3304 continue;
3305 }
3306
3307 if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
3308 getAllocationFamily(I, &TLI) == Family) {
3309 assert(Family);
3310 Users.emplace_back(I);
3311 Worklist.push_back(I);
3312 continue;
3313 }
3314
3315 return false;
3316
3317 case Instruction::Store: {
3318 StoreInst *SI = cast<StoreInst>(I);
3319 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3320 return false;
3321 Users.emplace_back(I);
3322 continue;
3323 }
3324 }
3325 llvm_unreachable("missing a return?");
3326 }
3327 } while (!Worklist.empty());
3328 return true;
3329}
3330
3332 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
3333
3334 // If we have a malloc call which is only used in any amount of comparisons to
3335 // null and free calls, delete the calls and replace the comparisons with true
3336 // or false as appropriate.
3337
3338 // This is based on the principle that we can substitute our own allocation
3339 // function (which will never return null) rather than knowledge of the
3340 // specific function being called. In some sense this can change the permitted
3341 // outputs of a program (when we convert a malloc to an alloca, the fact that
3342 // the allocation is now on the stack is potentially visible, for example),
3343 // but we believe in a permissible manner.
3345
3346 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3347 // before each store.
3350 std::unique_ptr<DIBuilder> DIB;
3351 if (isa<AllocaInst>(MI)) {
3352 findDbgUsers(DVIs, &MI, &DVRs);
3353 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3354 }
3355
3356 if (isAllocSiteRemovable(&MI, Users, TLI)) {
3357 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3358 // Lowering all @llvm.objectsize calls first because they may
3359 // use a bitcast/GEP of the alloca we are removing.
3360 if (!Users[i])
3361 continue;
3362
3363 Instruction *I = cast<Instruction>(&*Users[i]);
3364
3365 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3366 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3367 SmallVector<Instruction *> InsertedInstructions;
3368 Value *Result = lowerObjectSizeCall(
3369 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3370 for (Instruction *Inserted : InsertedInstructions)
3371 Worklist.add(Inserted);
3372 replaceInstUsesWith(*I, Result);
3374 Users[i] = nullptr; // Skip examining in the next loop.
3375 }
3376 }
3377 }
3378 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3379 if (!Users[i])
3380 continue;
3381
3382 Instruction *I = cast<Instruction>(&*Users[i]);
3383
3384 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3386 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3387 C->isFalseWhenEqual()));
3388 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3389 for (auto *DVI : DVIs)
3390 if (DVI->isAddressOfVariable())
3391 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
3392 for (auto *DVR : DVRs)
3393 if (DVR->isAddressOfVariable())
3394 ConvertDebugDeclareToDebugValue(DVR, SI, *DIB);
3395 } else {
3396 // Casts, GEP, or anything else: we're about to delete this instruction,
3397 // so it can not have any valid uses.
3398 replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
3399 }
3401 }
3402
3403 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
3404 // Replace invoke with a NOP intrinsic to maintain the original CFG
3405 Module *M = II->getModule();
3406 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3407 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), {}, "",
3408 II->getParent());
3409 }
3410
3411 // Remove debug intrinsics which describe the value contained within the
3412 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3413 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3414 //
3415 // ```
3416 // define void @foo(i32 %0) {
3417 // %a = alloca i32 ; Deleted.
3418 // store i32 %0, i32* %a
3419 // dbg.value(i32 %0, "arg0") ; Not deleted.
3420 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3421 // call void @trivially_inlinable_no_op(i32* %a)
3422 // ret void
3423 // }
3424 // ```
3425 //
3426 // This may not be required if we stop describing the contents of allocas
3427 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3428 // the LowerDbgDeclare utility.
3429 //
3430 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3431 // "arg0" dbg.value may be stale after the call. However, failing to remove
3432 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3433 //
3434 // FIXME: the Assignment Tracking project has now likely made this
3435 // redundant (and it's sometimes harmful).
3436 for (auto *DVI : DVIs)
3437 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3438 DVI->eraseFromParent();
3439 for (auto *DVR : DVRs)
3440 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3441 DVR->eraseFromParent();
3442
3443 return eraseInstFromFunction(MI);
3444 }
3445 return nullptr;
3446}
3447
3448/// Move the call to free before a NULL test.
3449///
3450/// Check if this free is accessed after its argument has been test
3451/// against NULL (property 0).
3452/// If yes, it is legal to move this call in its predecessor block.
3453///
3454/// The move is performed only if the block containing the call to free
3455/// will be removed, i.e.:
3456/// 1. it has only one predecessor P, and P has two successors
3457/// 2. it contains the call, noops, and an unconditional branch
3458/// 3. its successor is the same as its predecessor's successor
3459///
3460/// The profitability is out-of concern here and this function should
3461/// be called only if the caller knows this transformation would be
3462/// profitable (e.g., for code size).
3464 const DataLayout &DL) {
3465 Value *Op = FI.getArgOperand(0);
3466 BasicBlock *FreeInstrBB = FI.getParent();
3467 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3468
3469 // Validate part of constraint #1: Only one predecessor
3470 // FIXME: We can extend the number of predecessor, but in that case, we
3471 // would duplicate the call to free in each predecessor and it may
3472 // not be profitable even for code size.
3473 if (!PredBB)
3474 return nullptr;
3475
3476 // Validate constraint #2: Does this block contains only the call to
3477 // free, noops, and an unconditional branch?
3478 BasicBlock *SuccBB;
3479 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3480 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3481 return nullptr;
3482
3483 // If there are only 2 instructions in the block, at this point,
3484 // this is the call to free and unconditional.
3485 // If there are more than 2 instructions, check that they are noops
3486 // i.e., they won't hurt the performance of the generated code.
3487 if (FreeInstrBB->size() != 2) {
3488 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3489 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3490 continue;
3491 auto *Cast = dyn_cast<CastInst>(&Inst);
3492 if (!Cast || !Cast->isNoopCast(DL))
3493 return nullptr;
3494 }
3495 }
3496 // Validate the rest of constraint #1 by matching on the pred branch.
3497 Instruction *TI = PredBB->getTerminator();
3498 BasicBlock *TrueBB, *FalseBB;
3499 CmpPredicate Pred;
3500 if (!match(TI, m_Br(m_ICmp(Pred,
3502 m_Specific(Op->stripPointerCasts())),
3503 m_Zero()),
3504 TrueBB, FalseBB)))
3505 return nullptr;
3506 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3507 return nullptr;
3508
3509 // Validate constraint #3: Ensure the null case just falls through.
3510 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3511 return nullptr;
3512 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3513 "Broken CFG: missing edge from predecessor to successor");
3514
3515 // At this point, we know that everything in FreeInstrBB can be moved
3516 // before TI.
3517 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3518 if (&Instr == FreeInstrBBTerminator)
3519 break;
3520 Instr.moveBeforePreserving(TI);
3521 }
3522 assert(FreeInstrBB->size() == 1 &&
3523 "Only the branch instruction should remain");
3524
3525 // Now that we've moved the call to free before the NULL check, we have to
3526 // remove any attributes on its parameter that imply it's non-null, because
3527 // those attributes might have only been valid because of the NULL check, and
3528 // we can get miscompiles if we keep them. This is conservative if non-null is
3529 // also implied by something other than the NULL check, but it's guaranteed to
3530 // be correct, and the conservativeness won't matter in practice, since the
3531 // attributes are irrelevant for the call to free itself and the pointer
3532 // shouldn't be used after the call.
3533 AttributeList Attrs = FI.getAttributes();
3534 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3535 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3536 if (Dereferenceable.isValid()) {
3537 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3538 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3539 Attribute::Dereferenceable);
3540 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3541 }
3542 FI.setAttributes(Attrs);
3543
3544 return &FI;
3545}
3546
3548 // free undef -> unreachable.
3549 if (isa<UndefValue>(Op)) {
3550 // Leave a marker since we can't modify the CFG here.
3552 return eraseInstFromFunction(FI);
3553 }
3554
3555 // If we have 'free null' delete the instruction. This can happen in stl code
3556 // when lots of inlining happens.
3557 if (isa<ConstantPointerNull>(Op))
3558 return eraseInstFromFunction(FI);
3559
3560 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3561 // realloc() entirely.
3562 CallInst *CI = dyn_cast<CallInst>(Op);
3563 if (CI && CI->hasOneUse())
3564 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3565 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3566
3567 // If we optimize for code size, try to move the call to free before the null
3568 // test so that simplify cfg can remove the empty block and dead code
3569 // elimination the branch. I.e., helps to turn something like:
3570 // if (foo) free(foo);
3571 // into
3572 // free(foo);
3573 //
3574 // Note that we can only do this for 'free' and not for any flavor of
3575 // 'operator delete'; there is no 'operator delete' symbol for which we are
3576 // permitted to invent a call, even if we're passing in a null pointer.
3577 if (MinimizeSize) {
3578 LibFunc Func;
3579 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3581 return I;
3582 }
3583
3584 return nullptr;
3585}
3586
3588 Value *RetVal = RI.getReturnValue();
3589 if (!RetVal || !AttributeFuncs::isNoFPClassCompatibleType(RetVal->getType()))
3590 return nullptr;
3591
3592 Function *F = RI.getFunction();
3593 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3594 if (ReturnClass == fcNone)
3595 return nullptr;
3596
3597 KnownFPClass KnownClass;
3598 Value *Simplified =
3599 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, 0, &RI);
3600 if (!Simplified)
3601 return nullptr;
3602
3603 return ReturnInst::Create(RI.getContext(), Simplified);
3604}
3605
3606// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3608 // Try to remove the previous instruction if it must lead to unreachable.
3609 // This includes instructions like stores and "llvm.assume" that may not get
3610 // removed by simple dead code elimination.
3611 bool Changed = false;
3612 while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3613 // While we theoretically can erase EH, that would result in a block that
3614 // used to start with an EH no longer starting with EH, which is invalid.
3615 // To make it valid, we'd need to fixup predecessors to no longer refer to
3616 // this block, but that changes CFG, which is not allowed in InstCombine.
3617 if (Prev->isEHPad())
3618 break; // Can not drop any more instructions. We're done here.
3619
3621 break; // Can not drop any more instructions. We're done here.
3622 // Otherwise, this instruction can be freely erased,
3623 // even if it is not side-effect free.
3624
3625 // A value may still have uses before we process it here (for example, in
3626 // another unreachable block), so convert those to poison.
3627 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3628 eraseInstFromFunction(*Prev);
3629 Changed = true;
3630 }
3631 return Changed;
3632}
3633
3636 return nullptr;
3637}
3638
3640 assert(BI.isUnconditional() && "Only for unconditional branches.");
3641
3642 // If this store is the second-to-last instruction in the basic block
3643 // (excluding debug info and bitcasts of pointers) and if the block ends with
3644 // an unconditional branch, try to move the store to the successor block.
3645
3646 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3647 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3648 return BBI->isDebugOrPseudoInst() ||
3649 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3650 };
3651
3652 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3653 do {
3654 if (BBI != FirstInstr)
3655 --BBI;
3656 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3657
3658 return dyn_cast<StoreInst>(BBI);
3659 };
3660
3661 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3662 if (mergeStoreIntoSuccessor(*SI))
3663 return &BI;
3664
3665 return nullptr;
3666}
3667
3670 if (!DeadEdges.insert({From, To}).second)
3671 return;
3672
3673 // Replace phi node operands in successor with poison.
3674 for (PHINode &PN : To->phis())
3675 for (Use &U : PN.incoming_values())
3676 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
3677 replaceUse(U, PoisonValue::get(PN.getType()));
3678 addToWorklist(&PN);
3679 MadeIRChange = true;
3680 }
3681
3682 Worklist.push_back(To);
3683}
3684
3685// Under the assumption that I is unreachable, remove it and following
3686// instructions. Changes are reported directly to MadeIRChange.
3689 BasicBlock *BB = I->getParent();
3690 for (Instruction &Inst : make_early_inc_range(
3691 make_range(std::next(BB->getTerminator()->getReverseIterator()),
3692 std::next(I->getReverseIterator())))) {
3693 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3694 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
3695 MadeIRChange = true;
3696 }
3697 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3698 continue;
3699 // RemoveDIs: erase debug-info on this instruction manually.
3700 Inst.dropDbgRecords();
3702 MadeIRChange = true;
3703 }
3704
3705 SmallVector<Value *> Changed;
3706 if (handleUnreachableTerminator(BB->getTerminator(), Changed)) {
3707 MadeIRChange = true;
3708 for (Value *V : Changed)
3709 addToWorklist(cast<Instruction>(V));
3710 }
3711
3712 // Handle potentially dead successors.
3713 for (BasicBlock *Succ : successors(BB))
3714 addDeadEdge(BB, Succ, Worklist);
3715}
3716
3719 while (!Worklist.empty()) {
3720 BasicBlock *BB = Worklist.pop_back_val();
3721 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
3722 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
3723 }))
3724 continue;
3725
3727 }
3728}
3729
3731 BasicBlock *LiveSucc) {
3733 for (BasicBlock *Succ : successors(BB)) {
3734 // The live successor isn't dead.
3735 if (Succ == LiveSucc)
3736 continue;
3737
3738 addDeadEdge(BB, Succ, Worklist);
3739 }
3740
3742}
3743
3745 if (BI.isUnconditional())
3747
3748 // Change br (not X), label True, label False to: br X, label False, True
3749 Value *Cond = BI.getCondition();
3750 Value *X;
3751 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
3752 // Swap Destinations and condition...
3753 BI.swapSuccessors();
3754 if (BPI)
3756 return replaceOperand(BI, 0, X);
3757 }
3758
3759 // Canonicalize logical-and-with-invert as logical-or-with-invert.
3760 // This is done by inverting the condition and swapping successors:
3761 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
3762 Value *Y;
3763 if (isa<SelectInst>(Cond) &&
3764 match(Cond,
3766 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
3767 Value *Or = Builder.CreateLogicalOr(NotX, Y);
3768 BI.swapSuccessors();
3769 if (BPI)
3771 return replaceOperand(BI, 0, Or);
3772 }
3773
3774 // If the condition is irrelevant, remove the use so that other
3775 // transforms on the condition become more effective.
3776 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
3777 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
3778
3779 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3780 CmpPredicate Pred;
3781 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
3782 !isCanonicalPredicate(Pred)) {
3783 // Swap destinations and condition.
3784 auto *Cmp = cast<CmpInst>(Cond);
3785 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
3786 BI.swapSuccessors();
3787 if (BPI)
3789 Worklist.push(Cmp);
3790 return &BI;
3791 }
3792
3793 if (isa<UndefValue>(Cond)) {
3794 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
3795 return nullptr;
3796 }
3797 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3799 BI.getSuccessor(!CI->getZExtValue()));
3800 return nullptr;
3801 }
3802
3803 // Replace all dominated uses of the condition with true/false
3804 // Ignore constant expressions to avoid iterating over uses on other
3805 // functions.
3806 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
3807 for (auto &U : make_early_inc_range(Cond->uses())) {
3808 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
3809 if (DT.dominates(Edge0, U)) {
3810 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
3811 addToWorklist(cast<Instruction>(U.getUser()));
3812 continue;
3813 }
3814 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
3815 if (DT.dominates(Edge1, U)) {
3816 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
3817 addToWorklist(cast<Instruction>(U.getUser()));
3818 }
3819 }
3820 }
3821
3822 DC.registerBranch(&BI);
3823 return nullptr;
3824}
3825
3826// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
3827// we can prove that both (switch C) and (switch X) go to the default when cond
3828// is false/true.
3831 bool IsTrueArm) {
3832 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3833 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
3834 if (!C)
3835 return nullptr;
3836
3837 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
3838 if (CstBB != SI.getDefaultDest())
3839 return nullptr;
3840 Value *X = Select->getOperand(3 - CstOpIdx);
3841 CmpPredicate Pred;
3842 const APInt *RHSC;
3843 if (!match(Select->getCondition(),
3844 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
3845 return nullptr;
3846 if (IsTrueArm)
3847 Pred = ICmpInst::getInversePredicate(Pred);
3848
3849 // See whether we can replace the select with X
3851 for (auto Case : SI.cases())
3852 if (!CR.contains(Case.getCaseValue()->getValue()))
3853 return nullptr;
3854
3855 return X;
3856}
3857
3859 Value *Cond = SI.getCondition();
3860 Value *Op0;
3861 ConstantInt *AddRHS;
3862 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3863 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3864 for (auto Case : SI.cases()) {
3865 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3866 assert(isa<ConstantInt>(NewCase) &&
3867 "Result of expression should be constant");
3868 Case.setValue(cast<ConstantInt>(NewCase));
3869 }
3870 return replaceOperand(SI, 0, Op0);
3871 }
3872
3873 ConstantInt *SubLHS;
3874 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
3875 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
3876 for (auto Case : SI.cases()) {
3877 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
3878 assert(isa<ConstantInt>(NewCase) &&
3879 "Result of expression should be constant");
3880 Case.setValue(cast<ConstantInt>(NewCase));
3881 }
3882 return replaceOperand(SI, 0, Op0);
3883 }
3884
3885 uint64_t ShiftAmt;
3886 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
3887 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
3888 all_of(SI.cases(), [&](const auto &Case) {
3889 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3890 })) {
3891 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
3892 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond);
3893 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
3894 Shl->hasOneUse()) {
3895 Value *NewCond = Op0;
3896 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
3897 // If the shift may wrap, we need to mask off the shifted bits.
3898 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
3899 NewCond = Builder.CreateAnd(
3900 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
3901 }
3902 for (auto Case : SI.cases()) {
3903 const APInt &CaseVal = Case.getCaseValue()->getValue();
3904 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
3905 : CaseVal.lshr(ShiftAmt);
3906 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3907 }
3908 return replaceOperand(SI, 0, NewCond);
3909 }
3910 }
3911
3912 // Fold switch(zext/sext(X)) into switch(X) if possible.
3913 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
3914 bool IsZExt = isa<ZExtInst>(Cond);
3915 Type *SrcTy = Op0->getType();
3916 unsigned NewWidth = SrcTy->getScalarSizeInBits();
3917
3918 if (all_of(SI.cases(), [&](const auto &Case) {
3919 const APInt &CaseVal = Case.getCaseValue()->getValue();
3920 return IsZExt ? CaseVal.isIntN(NewWidth)
3921 : CaseVal.isSignedIntN(NewWidth);
3922 })) {
3923 for (auto &Case : SI.cases()) {
3924 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3925 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3926 }
3927 return replaceOperand(SI, 0, Op0);
3928 }
3929 }
3930
3931 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
3932 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
3933 if (Value *V =
3934 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
3935 return replaceOperand(SI, 0, V);
3936 if (Value *V =
3937 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
3938 return replaceOperand(SI, 0, V);
3939 }
3940
3941 KnownBits Known = computeKnownBits(Cond, 0, &SI);
3942 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3943 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3944
3945 // Compute the number of leading bits we can ignore.
3946 // TODO: A better way to determine this would use ComputeNumSignBits().
3947 for (const auto &C : SI.cases()) {
3948 LeadingKnownZeros =
3949 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
3950 LeadingKnownOnes =
3951 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
3952 }
3953
3954 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3955
3956 // Shrink the condition operand if the new type is smaller than the old type.
3957 // But do not shrink to a non-standard type, because backend can't generate
3958 // good code for that yet.
3959 // TODO: We can make it aggressive again after fixing PR39569.
3960 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3961 shouldChangeType(Known.getBitWidth(), NewWidth)) {
3962 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3964 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3965
3966 for (auto Case : SI.cases()) {
3967 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3968 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3969 }
3970 return replaceOperand(SI, 0, NewCond);
3971 }
3972
3973 if (isa<UndefValue>(Cond)) {
3974 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
3975 return nullptr;
3976 }
3977 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3978 handlePotentiallyDeadSuccessors(SI.getParent(),
3979 SI.findCaseValue(CI)->getCaseSuccessor());
3980 return nullptr;
3981 }
3982
3983 return nullptr;
3984}
3985
3987InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
3988 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
3989 if (!WO)
3990 return nullptr;
3991
3992 Intrinsic::ID OvID = WO->getIntrinsicID();
3993 const APInt *C = nullptr;
3994 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
3995 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3996 OvID == Intrinsic::umul_with_overflow)) {
3997 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
3998 if (C->isAllOnes())
3999 return BinaryOperator::CreateNeg(WO->getLHS());
4000 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4001 if (C->isPowerOf2()) {
4002 return BinaryOperator::CreateShl(
4003 WO->getLHS(),
4004 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4005 }
4006 }
4007 }
4008
4009 // We're extracting from an overflow intrinsic. See if we're the only user.
4010 // That allows us to simplify multiple result intrinsics to simpler things
4011 // that just get one value.
4012 if (!WO->hasOneUse())
4013 return nullptr;
4014
4015 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4016 // and replace it with a traditional binary instruction.
4017 if (*EV.idx_begin() == 0) {
4018 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4019 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4020 // Replace the old instruction's uses with poison.
4021 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4023 return BinaryOperator::Create(BinOp, LHS, RHS);
4024 }
4025
4026 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4027
4028 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4029 if (OvID == Intrinsic::usub_with_overflow)
4030 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4031
4032 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4033 // +1 is not possible because we assume signed values.
4034 if (OvID == Intrinsic::smul_with_overflow &&
4035 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4036 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4037
4038 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4039 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4040 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4041 // Only handle even bitwidths for performance reasons.
4042 if (BitWidth % 2 == 0)
4043 return new ICmpInst(
4044 ICmpInst::ICMP_UGT, WO->getLHS(),
4045 ConstantInt::get(WO->getLHS()->getType(),
4047 }
4048
4049 // If only the overflow result is used, and the right hand side is a
4050 // constant (or constant splat), we can remove the intrinsic by directly
4051 // checking for overflow.
4052 if (C) {
4053 // Compute the no-wrap range for LHS given RHS=C, then construct an
4054 // equivalent icmp, potentially using an offset.
4056 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4057
4058 CmpInst::Predicate Pred;
4059 APInt NewRHSC, Offset;
4060 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4061 auto *OpTy = WO->getRHS()->getType();
4062 auto *NewLHS = WO->getLHS();
4063 if (Offset != 0)
4064 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4065 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4066 ConstantInt::get(OpTy, NewRHSC));
4067 }
4068
4069 return nullptr;
4070}
4071
4073 Value *Agg = EV.getAggregateOperand();
4074
4075 if (!EV.hasIndices())
4076 return replaceInstUsesWith(EV, Agg);
4077
4078 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4079 SQ.getWithInstruction(&EV)))
4080 return replaceInstUsesWith(EV, V);
4081
4082 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
4083 // We're extracting from an insertvalue instruction, compare the indices
4084 const unsigned *exti, *exte, *insi, *inse;
4085 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4086 exte = EV.idx_end(), inse = IV->idx_end();
4087 exti != exte && insi != inse;
4088 ++exti, ++insi) {
4089 if (*insi != *exti)
4090 // The insert and extract both reference distinctly different elements.
4091 // This means the extract is not influenced by the insert, and we can
4092 // replace the aggregate operand of the extract with the aggregate
4093 // operand of the insert. i.e., replace
4094 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4095 // %E = extractvalue { i32, { i32 } } %I, 0
4096 // with
4097 // %E = extractvalue { i32, { i32 } } %A, 0
4098 return ExtractValueInst::Create(IV->getAggregateOperand(),
4099 EV.getIndices());
4100 }
4101 if (exti == exte && insi == inse)
4102 // Both iterators are at the end: Index lists are identical. Replace
4103 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4104 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4105 // with "i32 42"
4106 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4107 if (exti == exte) {
4108 // The extract list is a prefix of the insert list. i.e. replace
4109 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4110 // %E = extractvalue { i32, { i32 } } %I, 1
4111 // with
4112 // %X = extractvalue { i32, { i32 } } %A, 1
4113 // %E = insertvalue { i32 } %X, i32 42, 0
4114 // by switching the order of the insert and extract (though the
4115 // insertvalue should be left in, since it may have other uses).
4116 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4117 EV.getIndices());
4118 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4119 ArrayRef(insi, inse));
4120 }
4121 if (insi == inse)
4122 // The insert list is a prefix of the extract list
4123 // We can simply remove the common indices from the extract and make it
4124 // operate on the inserted value instead of the insertvalue result.
4125 // i.e., replace
4126 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4127 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4128 // with
4129 // %E extractvalue { i32 } { i32 42 }, 0
4130 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4131 ArrayRef(exti, exte));
4132 }
4133
4134 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4135 return R;
4136
4137 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4138 // Bail out if the aggregate contains scalable vector type
4139 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4140 STy && STy->isScalableTy())
4141 return nullptr;
4142
4143 // If the (non-volatile) load only has one use, we can rewrite this to a
4144 // load from a GEP. This reduces the size of the load. If a load is used
4145 // only by extractvalue instructions then this either must have been
4146 // optimized before, or it is a struct with padding, in which case we
4147 // don't want to do the transformation as it loses padding knowledge.
4148 if (L->isSimple() && L->hasOneUse()) {
4149 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4150 SmallVector<Value*, 4> Indices;
4151 // Prefix an i32 0 since we need the first element.
4152 Indices.push_back(Builder.getInt32(0));
4153 for (unsigned Idx : EV.indices())
4154 Indices.push_back(Builder.getInt32(Idx));
4155
4156 // We need to insert these at the location of the old load, not at that of
4157 // the extractvalue.
4159 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4160 L->getPointerOperand(), Indices);
4162 // Whatever aliasing information we had for the orignal load must also
4163 // hold for the smaller load, so propagate the annotations.
4164 NL->setAAMetadata(L->getAAMetadata());
4165 // Returning the load directly will cause the main loop to insert it in
4166 // the wrong spot, so use replaceInstUsesWith().
4167 return replaceInstUsesWith(EV, NL);
4168 }
4169 }
4170
4171 if (auto *PN = dyn_cast<PHINode>(Agg))
4172 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4173 return Res;
4174
4175 // Canonicalize extract (select Cond, TV, FV)
4176 // -> select cond, (extract TV), (extract FV)
4177 if (auto *SI = dyn_cast<SelectInst>(Agg))
4178 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4179 return R;
4180
4181 // We could simplify extracts from other values. Note that nested extracts may
4182 // already be simplified implicitly by the above: extract (extract (insert) )
4183 // will be translated into extract ( insert ( extract ) ) first and then just
4184 // the value inserted, if appropriate. Similarly for extracts from single-use
4185 // loads: extract (extract (load)) will be translated to extract (load (gep))
4186 // and if again single-use then via load (gep (gep)) to load (gep).
4187 // However, double extracts from e.g. function arguments or return values
4188 // aren't handled yet.
4189 return nullptr;
4190}
4191
4192/// Return 'true' if the given typeinfo will match anything.
4193static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4194 switch (Personality) {
4198 // The GCC C EH and Rust personality only exists to support cleanups, so
4199 // it's not clear what the semantics of catch clauses are.
4200 return false;
4202 return false;
4204 // While __gnat_all_others_value will match any Ada exception, it doesn't
4205 // match foreign exceptions (or didn't, before gcc-4.7).
4206 return false;
4217 return TypeInfo->isNullValue();
4218 }
4219 llvm_unreachable("invalid enum");
4220}
4221
4222static bool shorter_filter(const Value *LHS, const Value *RHS) {
4223 return
4224 cast<ArrayType>(LHS->getType())->getNumElements()
4225 <
4226 cast<ArrayType>(RHS->getType())->getNumElements();
4227}
4228
4230 // The logic here should be correct for any real-world personality function.
4231 // However if that turns out not to be true, the offending logic can always
4232 // be conditioned on the personality function, like the catch-all logic is.
4233 EHPersonality Personality =
4234 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4235
4236 // Simplify the list of clauses, eg by removing repeated catch clauses
4237 // (these are often created by inlining).
4238 bool MakeNewInstruction = false; // If true, recreate using the following:
4239 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4240 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4241
4242 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4243 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4244 bool isLastClause = i + 1 == e;
4245 if (LI.isCatch(i)) {
4246 // A catch clause.
4247 Constant *CatchClause = LI.getClause(i);
4248 Constant *TypeInfo = CatchClause->stripPointerCasts();
4249
4250 // If we already saw this clause, there is no point in having a second
4251 // copy of it.
4252 if (AlreadyCaught.insert(TypeInfo).second) {
4253 // This catch clause was not already seen.
4254 NewClauses.push_back(CatchClause);
4255 } else {
4256 // Repeated catch clause - drop the redundant copy.
4257 MakeNewInstruction = true;
4258 }
4259
4260 // If this is a catch-all then there is no point in keeping any following
4261 // clauses or marking the landingpad as having a cleanup.
4262 if (isCatchAll(Personality, TypeInfo)) {
4263 if (!isLastClause)
4264 MakeNewInstruction = true;
4265 CleanupFlag = false;
4266 break;
4267 }
4268 } else {
4269 // A filter clause. If any of the filter elements were already caught
4270 // then they can be dropped from the filter. It is tempting to try to
4271 // exploit the filter further by saying that any typeinfo that does not
4272 // occur in the filter can't be caught later (and thus can be dropped).
4273 // However this would be wrong, since typeinfos can match without being
4274 // equal (for example if one represents a C++ class, and the other some
4275 // class derived from it).
4276 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4277 Constant *FilterClause = LI.getClause(i);
4278 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4279 unsigned NumTypeInfos = FilterType->getNumElements();
4280
4281 // An empty filter catches everything, so there is no point in keeping any
4282 // following clauses or marking the landingpad as having a cleanup. By
4283 // dealing with this case here the following code is made a bit simpler.
4284 if (!NumTypeInfos) {
4285 NewClauses.push_back(FilterClause);
4286 if (!isLastClause)
4287 MakeNewInstruction = true;
4288 CleanupFlag = false;
4289 break;
4290 }
4291
4292 bool MakeNewFilter = false; // If true, make a new filter.
4293 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4294 if (isa<ConstantAggregateZero>(FilterClause)) {
4295 // Not an empty filter - it contains at least one null typeinfo.
4296 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4297 Constant *TypeInfo =
4299 // If this typeinfo is a catch-all then the filter can never match.
4300 if (isCatchAll(Personality, TypeInfo)) {
4301 // Throw the filter away.
4302 MakeNewInstruction = true;
4303 continue;
4304 }
4305
4306 // There is no point in having multiple copies of this typeinfo, so
4307 // discard all but the first copy if there is more than one.
4308 NewFilterElts.push_back(TypeInfo);
4309 if (NumTypeInfos > 1)
4310 MakeNewFilter = true;
4311 } else {
4312 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4313 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4314 NewFilterElts.reserve(NumTypeInfos);
4315
4316 // Remove any filter elements that were already caught or that already
4317 // occurred in the filter. While there, see if any of the elements are
4318 // catch-alls. If so, the filter can be discarded.
4319 bool SawCatchAll = false;
4320 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4321 Constant *Elt = Filter->getOperand(j);
4322 Constant *TypeInfo = Elt->stripPointerCasts();
4323 if (isCatchAll(Personality, TypeInfo)) {
4324 // This element is a catch-all. Bail out, noting this fact.
4325 SawCatchAll = true;
4326 break;
4327 }
4328
4329 // Even if we've seen a type in a catch clause, we don't want to
4330 // remove it from the filter. An unexpected type handler may be
4331 // set up for a call site which throws an exception of the same
4332 // type caught. In order for the exception thrown by the unexpected
4333 // handler to propagate correctly, the filter must be correctly
4334 // described for the call site.
4335 //
4336 // Example:
4337 //
4338 // void unexpected() { throw 1;}
4339 // void foo() throw (int) {
4340 // std::set_unexpected(unexpected);
4341 // try {
4342 // throw 2.0;
4343 // } catch (int i) {}
4344 // }
4345
4346 // There is no point in having multiple copies of the same typeinfo in
4347 // a filter, so only add it if we didn't already.
4348 if (SeenInFilter.insert(TypeInfo).second)
4349 NewFilterElts.push_back(cast<Constant>(Elt));
4350 }
4351 // A filter containing a catch-all cannot match anything by definition.
4352 if (SawCatchAll) {
4353 // Throw the filter away.
4354 MakeNewInstruction = true;
4355 continue;
4356 }
4357
4358 // If we dropped something from the filter, make a new one.
4359 if (NewFilterElts.size() < NumTypeInfos)
4360 MakeNewFilter = true;
4361 }
4362 if (MakeNewFilter) {
4363 FilterType = ArrayType::get(FilterType->getElementType(),
4364 NewFilterElts.size());
4365 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4366 MakeNewInstruction = true;
4367 }
4368
4369 NewClauses.push_back(FilterClause);
4370
4371 // If the new filter is empty then it will catch everything so there is
4372 // no point in keeping any following clauses or marking the landingpad
4373 // as having a cleanup. The case of the original filter being empty was
4374 // already handled above.
4375 if (MakeNewFilter && !NewFilterElts.size()) {
4376 assert(MakeNewInstruction && "New filter but not a new instruction!");
4377 CleanupFlag = false;
4378 break;
4379 }
4380 }
4381 }
4382
4383 // If several filters occur in a row then reorder them so that the shortest
4384 // filters come first (those with the smallest number of elements). This is
4385 // advantageous because shorter filters are more likely to match, speeding up
4386 // unwinding, but mostly because it increases the effectiveness of the other
4387 // filter optimizations below.
4388 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4389 unsigned j;
4390 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4391 for (j = i; j != e; ++j)
4392 if (!isa<ArrayType>(NewClauses[j]->getType()))
4393 break;
4394
4395 // Check whether the filters are already sorted by length. We need to know
4396 // if sorting them is actually going to do anything so that we only make a
4397 // new landingpad instruction if it does.
4398 for (unsigned k = i; k + 1 < j; ++k)
4399 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4400 // Not sorted, so sort the filters now. Doing an unstable sort would be
4401 // correct too but reordering filters pointlessly might confuse users.
4402 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4404 MakeNewInstruction = true;
4405 break;
4406 }
4407
4408 // Look for the next batch of filters.
4409 i = j + 1;
4410 }
4411
4412 // If typeinfos matched if and only if equal, then the elements of a filter L
4413 // that occurs later than a filter F could be replaced by the intersection of
4414 // the elements of F and L. In reality two typeinfos can match without being
4415 // equal (for example if one represents a C++ class, and the other some class
4416 // derived from it) so it would be wrong to perform this transform in general.
4417 // However the transform is correct and useful if F is a subset of L. In that
4418 // case L can be replaced by F, and thus removed altogether since repeating a
4419 // filter is pointless. So here we look at all pairs of filters F and L where
4420 // L follows F in the list of clauses, and remove L if every element of F is
4421 // an element of L. This can occur when inlining C++ functions with exception
4422 // specifications.
4423 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4424 // Examine each filter in turn.
4425 Value *Filter = NewClauses[i];
4426 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4427 if (!FTy)
4428 // Not a filter - skip it.
4429 continue;
4430 unsigned FElts = FTy->getNumElements();
4431 // Examine each filter following this one. Doing this backwards means that
4432 // we don't have to worry about filters disappearing under us when removed.
4433 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4434 Value *LFilter = NewClauses[j];
4435 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4436 if (!LTy)
4437 // Not a filter - skip it.
4438 continue;
4439 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4440 // an element of LFilter, then discard LFilter.
4441 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4442 // If Filter is empty then it is a subset of LFilter.
4443 if (!FElts) {
4444 // Discard LFilter.
4445 NewClauses.erase(J);
4446 MakeNewInstruction = true;
4447 // Move on to the next filter.
4448 continue;
4449 }
4450 unsigned LElts = LTy->getNumElements();
4451 // If Filter is longer than LFilter then it cannot be a subset of it.
4452 if (FElts > LElts)
4453 // Move on to the next filter.
4454 continue;
4455 // At this point we know that LFilter has at least one element.
4456 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4457 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4458 // already know that Filter is not longer than LFilter).
4459 if (isa<ConstantAggregateZero>(Filter)) {
4460 assert(FElts <= LElts && "Should have handled this case earlier!");
4461 // Discard LFilter.
4462 NewClauses.erase(J);
4463 MakeNewInstruction = true;
4464 }
4465 // Move on to the next filter.
4466 continue;
4467 }
4468 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4469 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4470 // Since Filter is non-empty and contains only zeros, it is a subset of
4471 // LFilter iff LFilter contains a zero.
4472 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4473 for (unsigned l = 0; l != LElts; ++l)
4474 if (LArray->getOperand(l)->isNullValue()) {
4475 // LFilter contains a zero - discard it.
4476 NewClauses.erase(J);
4477 MakeNewInstruction = true;
4478 break;
4479 }
4480 // Move on to the next filter.
4481 continue;
4482 }
4483 // At this point we know that both filters are ConstantArrays. Loop over
4484 // operands to see whether every element of Filter is also an element of
4485 // LFilter. Since filters tend to be short this is probably faster than
4486 // using a method that scales nicely.
4487 ConstantArray *FArray = cast<ConstantArray>(Filter);
4488 bool AllFound = true;
4489 for (unsigned f = 0; f != FElts; ++f) {
4490 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4491 AllFound = false;
4492 for (unsigned l = 0; l != LElts; ++l) {
4493 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4494 if (LTypeInfo == FTypeInfo) {
4495 AllFound = true;
4496 break;
4497 }
4498 }
4499 if (!AllFound)
4500 break;
4501 }
4502 if (AllFound) {
4503 // Discard LFilter.
4504 NewClauses.erase(J);
4505 MakeNewInstruction = true;
4506 }
4507 // Move on to the next filter.
4508 }
4509 }
4510
4511 // If we changed any of the clauses, replace the old landingpad instruction
4512 // with a new one.
4513 if (MakeNewInstruction) {
4515 NewClauses.size());
4516 for (Constant *C : NewClauses)
4517 NLI->addClause(C);
4518 // A landing pad with no clauses must have the cleanup flag set. It is
4519 // theoretically possible, though highly unlikely, that we eliminated all
4520 // clauses. If so, force the cleanup flag to true.
4521 if (NewClauses.empty())
4522 CleanupFlag = true;
4523 NLI->setCleanup(CleanupFlag);
4524 return NLI;
4525 }
4526
4527 // Even if none of the clauses changed, we may nonetheless have understood
4528 // that the cleanup flag is pointless. Clear it if so.
4529 if (LI.isCleanup() != CleanupFlag) {
4530 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4531 LI.setCleanup(CleanupFlag);
4532 return &LI;
4533 }
4534
4535 return nullptr;
4536}
4537
4538Value *
4540 // Try to push freeze through instructions that propagate but don't produce
4541 // poison as far as possible. If an operand of freeze follows three
4542 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
4543 // guaranteed-non-poison operands then push the freeze through to the one
4544 // operand that is not guaranteed non-poison. The actual transform is as
4545 // follows.
4546 // Op1 = ... ; Op1 can be posion
4547 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
4548 // ; single guaranteed-non-poison operands
4549 // ... = Freeze(Op0)
4550 // =>
4551 // Op1 = ...
4552 // Op1.fr = Freeze(Op1)
4553 // ... = Inst(Op1.fr, NonPoisonOps...)
4554 auto *OrigOp = OrigFI.getOperand(0);
4555 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4556
4557 // While we could change the other users of OrigOp to use freeze(OrigOp), that
4558 // potentially reduces their optimization potential, so let's only do this iff
4559 // the OrigOp is only used by the freeze.
4560 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4561 return nullptr;
4562
4563 // We can't push the freeze through an instruction which can itself create
4564 // poison. If the only source of new poison is flags, we can simply
4565 // strip them (since we know the only use is the freeze and nothing can
4566 // benefit from them.)
4567 if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
4568 /*ConsiderFlagsAndMetadata*/ false))
4569 return nullptr;
4570
4571 // If operand is guaranteed not to be poison, there is no need to add freeze
4572 // to the operand. So we first find the operand that is not guaranteed to be
4573 // poison.
4574 Use *MaybePoisonOperand = nullptr;
4575 for (Use &U : OrigOpInst->operands()) {
4576 if (isa<MetadataAsValue>(U.get()) ||
4578 continue;
4579 if (!MaybePoisonOperand)
4580 MaybePoisonOperand = &U;
4581 else
4582 return nullptr;
4583 }
4584
4585 OrigOpInst->dropPoisonGeneratingAnnotations();
4586
4587 // If all operands are guaranteed to be non-poison, we can drop freeze.
4588 if (!MaybePoisonOperand)
4589 return OrigOp;
4590
4591 Builder.SetInsertPoint(OrigOpInst);
4592 auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
4593 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
4594
4595 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4596 return OrigOp;
4597}
4598
4600 PHINode *PN) {
4601 // Detect whether this is a recurrence with a start value and some number of
4602 // backedge values. We'll check whether we can push the freeze through the
4603 // backedge values (possibly dropping poison flags along the way) until we
4604 // reach the phi again. In that case, we can move the freeze to the start
4605 // value.
4606 Use *StartU = nullptr;
4608 for (Use &U : PN->incoming_values()) {
4609 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
4610 // Add backedge value to worklist.
4611 Worklist.push_back(U.get());
4612 continue;
4613 }
4614
4615 // Don't bother handling multiple start values.
4616 if (StartU)
4617 return nullptr;
4618 StartU = &U;
4619 }
4620
4621 if (!StartU || Worklist.empty())
4622 return nullptr; // Not a recurrence.
4623
4624 Value *StartV = StartU->get();
4625 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
4626 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
4627 // We can't insert freeze if the start value is the result of the
4628 // terminator (e.g. an invoke).
4629 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
4630 return nullptr;
4631
4634 while (!Worklist.empty()) {
4635 Value *V = Worklist.pop_back_val();
4636 if (!Visited.insert(V).second)
4637 continue;
4638
4639 if (Visited.size() > 32)
4640 return nullptr; // Limit the total number of values we inspect.
4641
4642 // Assume that PN is non-poison, because it will be after the transform.
4643 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
4644 continue;
4645
4646 Instruction *I = dyn_cast<Instruction>(V);
4647 if (!I || canCreateUndefOrPoison(cast<Operator>(I),
4648 /*ConsiderFlagsAndMetadata*/ false))
4649 return nullptr;
4650
4651 DropFlags.push_back(I);
4652 append_range(Worklist, I->operands());
4653 }
4654
4655 for (Instruction *I : DropFlags)
4656 I->dropPoisonGeneratingAnnotations();
4657
4658 if (StartNeedsFreeze) {
4660 Value *FrozenStartV = Builder.CreateFreeze(StartV,
4661 StartV->getName() + ".fr");
4662 replaceUse(*StartU, FrozenStartV);
4663 }
4664 return replaceInstUsesWith(FI, PN);
4665}
4666
4668 Value *Op = FI.getOperand(0);
4669
4670 if (isa<Constant>(Op) || Op->hasOneUse())
4671 return false;
4672
4673 // Move the freeze directly after the definition of its operand, so that
4674 // it dominates the maximum number of uses. Note that it may not dominate
4675 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
4676 // the normal/default destination. This is why the domination check in the
4677 // replacement below is still necessary.
4678 BasicBlock::iterator MoveBefore;
4679 if (isa<Argument>(Op)) {
4680 MoveBefore =
4682 } else {
4683 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
4684 if (!MoveBeforeOpt)
4685 return false;
4686 MoveBefore = *MoveBeforeOpt;
4687 }
4688
4689 // Don't move to the position of a debug intrinsic.
4690 if (isa<DbgInfoIntrinsic>(MoveBefore))
4691 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4692 // Re-point iterator to come after any debug-info records, if we're
4693 // running in "RemoveDIs" mode
4694 MoveBefore.setHeadBit(false);
4695
4696 bool Changed = false;
4697 if (&FI != &*MoveBefore) {
4698 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
4699 Changed = true;
4700 }
4701
4702 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
4703 bool Dominates = DT.dominates(&FI, U);
4704 Changed |= Dominates;
4705 return Dominates;
4706 });
4707
4708 return Changed;
4709}
4710
4711// Check if any direct or bitcast user of this value is a shuffle instruction.
4713 for (auto *U : V->users()) {
4714 if (isa<ShuffleVectorInst>(U))
4715 return true;
4716 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
4717 return true;
4718 }
4719 return false;
4720}
4721
4723 Value *Op0 = I.getOperand(0);
4724
4726 return replaceInstUsesWith(I, V);
4727
4728 // freeze (phi const, x) --> phi const, (freeze x)
4729 if (auto *PN = dyn_cast<PHINode>(Op0)) {
4730 if (Instruction *NV = foldOpIntoPhi(I, PN))
4731 return NV;
4732 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
4733 return NV;
4734 }
4735
4737 return replaceInstUsesWith(I, NI);
4738
4739 // If I is freeze(undef), check its uses and fold it to a fixed constant.
4740 // - or: pick -1
4741 // - select's condition: if the true value is constant, choose it by making
4742 // the condition true.
4743 // - default: pick 0
4744 //
4745 // Note that this transform is intentionally done here rather than
4746 // via an analysis in InstSimplify or at individual user sites. That is
4747 // because we must produce the same value for all uses of the freeze -
4748 // it's the reason "freeze" exists!
4749 //
4750 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
4751 // duplicating logic for binops at least.
4752 auto getUndefReplacement = [&I](Type *Ty) {
4753 Constant *BestValue = nullptr;
4754 Constant *NullValue = Constant::getNullValue(Ty);
4755 for (const auto *U : I.users()) {
4756 Constant *C = NullValue;
4757 if (match(U, m_Or(m_Value(), m_Value())))
4759 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
4760 C = ConstantInt::getTrue(Ty);
4761
4762 if (!BestValue)
4763 BestValue = C;
4764 else if (BestValue != C)
4765 BestValue = NullValue;
4766 }
4767 assert(BestValue && "Must have at least one use");
4768 return BestValue;
4769 };
4770
4771 if (match(Op0, m_Undef())) {
4772 // Don't fold freeze(undef/poison) if it's used as a vector operand in
4773 // a shuffle. This may improve codegen for shuffles that allow
4774 // unspecified inputs.
4776 return nullptr;
4777 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
4778 }
4779
4780 Constant *C;
4781 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
4782 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
4784 }
4785
4786 // Replace uses of Op with freeze(Op).
4787 if (freezeOtherUses(I))
4788 return &I;
4789
4790 return nullptr;
4791}
4792
4793/// Check for case where the call writes to an otherwise dead alloca. This
4794/// shows up for unused out-params in idiomatic C/C++ code. Note that this
4795/// helper *only* analyzes the write; doesn't check any other legality aspect.
4797 auto *CB = dyn_cast<CallBase>(I);
4798 if (!CB)
4799 // TODO: handle e.g. store to alloca here - only worth doing if we extend
4800 // to allow reload along used path as described below. Otherwise, this
4801 // is simply a store to a dead allocation which will be removed.
4802 return false;
4803 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
4804 if (!Dest)
4805 return false;
4806 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
4807 if (!AI)
4808 // TODO: allow malloc?
4809 return false;
4810 // TODO: allow memory access dominated by move point? Note that since AI
4811 // could have a reference to itself captured by the call, we would need to
4812 // account for cycles in doing so.
4813 SmallVector<const User *> AllocaUsers;
4815 auto pushUsers = [&](const Instruction &I) {
4816 for (const User *U : I.users()) {
4817 if (Visited.insert(U).second)
4818 AllocaUsers.push_back(U);
4819 }
4820 };
4821 pushUsers(*AI);
4822 while (!AllocaUsers.empty()) {
4823 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
4824 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
4825 pushUsers(*UserI);
4826 continue;
4827 }
4828 if (UserI == CB)
4829 continue;
4830 // TODO: support lifetime.start/end here
4831 return false;
4832 }
4833 return true;
4834}
4835
4836/// Try to move the specified instruction from its current block into the
4837/// beginning of DestBlock, which can only happen if it's safe to move the
4838/// instruction past all of the instructions between it and the end of its
4839/// block.
4841 BasicBlock *DestBlock) {
4842 BasicBlock *SrcBlock = I->getParent();
4843
4844 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4845 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
4846 I->isTerminator())
4847 return false;
4848
4849 // Do not sink static or dynamic alloca instructions. Static allocas must
4850 // remain in the entry block, and dynamic allocas must not be sunk in between
4851 // a stacksave / stackrestore pair, which would incorrectly shorten its
4852 // lifetime.
4853 if (isa<AllocaInst>(I))
4854 return false;
4855
4856 // Do not sink into catchswitch blocks.
4857 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
4858 return false;
4859
4860 // Do not sink convergent call instructions.
4861 if (auto *CI = dyn_cast<CallInst>(I)) {
4862 if (CI->isConvergent())
4863 return false;
4864 }
4865
4866 // Unless we can prove that the memory write isn't visibile except on the
4867 // path we're sinking to, we must bail.
4868 if (I->mayWriteToMemory()) {
4869 if (!SoleWriteToDeadLocal(I, TLI))
4870 return false;
4871 }
4872
4873 // We can only sink load instructions if there is nothing between the load and
4874 // the end of block that could change the value.
4875 if (I->mayReadFromMemory() &&
4876 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
4877 // We don't want to do any sophisticated alias analysis, so we only check
4878 // the instructions after I in I's parent block if we try to sink to its
4879 // successor block.
4880 if (DestBlock->getUniquePredecessor() != I->getParent())
4881 return false;
4882 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
4883 E = I->getParent()->end();
4884 Scan != E; ++Scan)
4885 if (Scan->mayWriteToMemory())
4886 return false;
4887 }
4888
4889 I->dropDroppableUses([&](const Use *U) {
4890 auto *I = dyn_cast<Instruction>(U->getUser());
4891 if (I && I->getParent() != DestBlock) {
4892 Worklist.add(I);
4893 return true;
4894 }
4895 return false;
4896 });
4897 /// FIXME: We could remove droppable uses that are not dominated by
4898 /// the new position.
4899
4900 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
4901 I->moveBefore(*DestBlock, InsertPos);
4902 ++NumSunkInst;
4903
4904 // Also sink all related debug uses from the source basic block. Otherwise we
4905 // get debug use before the def. Attempt to salvage debug uses first, to
4906 // maximise the range variables have location for. If we cannot salvage, then
4907 // mark the location undef: we know it was supposed to receive a new location
4908 // here, but that computation has been sunk.
4910 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
4911 findDbgUsers(DbgUsers, I, &DbgVariableRecords);
4912 if (!DbgUsers.empty())
4913 tryToSinkInstructionDbgValues(I, InsertPos, SrcBlock, DestBlock, DbgUsers);
4914 if (!DbgVariableRecords.empty())
4915 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
4916 DbgVariableRecords);
4917
4918 // PS: there are numerous flaws with this behaviour, not least that right now
4919 // assignments can be re-ordered past other assignments to the same variable
4920 // if they use different Values. Creating more undef assignements can never be
4921 // undone. And salvaging all users outside of this block can un-necessarily
4922 // alter the lifetime of the live-value that the variable refers to.
4923 // Some of these things can be resolved by tolerating debug use-before-defs in
4924 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
4925 // being used for more architectures.
4926
4927 return true;
4928}
4929
4931 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4933 // For all debug values in the destination block, the sunk instruction
4934 // will still be available, so they do not need to be dropped.
4936 for (auto &DbgUser : DbgUsers)
4937 if (DbgUser->getParent() != DestBlock)
4938 DbgUsersToSalvage.push_back(DbgUser);
4939
4940 // Process the sinking DbgUsersToSalvage in reverse order, as we only want
4941 // to clone the last appearing debug intrinsic for each given variable.
4943 for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage)
4944 if (DVI->getParent() == SrcBlock)
4945 DbgUsersToSink.push_back(DVI);
4946 llvm::sort(DbgUsersToSink,
4947 [](auto *A, auto *B) { return B->comesBefore(A); });
4948
4950 SmallSet<DebugVariable, 4> SunkVariables;
4951 for (auto *User : DbgUsersToSink) {
4952 // A dbg.declare instruction should not be cloned, since there can only be
4953 // one per variable fragment. It should be left in the original place
4954 // because the sunk instruction is not an alloca (otherwise we could not be
4955 // here).
4956 if (isa<DbgDeclareInst>(User))
4957 continue;
4958
4959 DebugVariable DbgUserVariable =
4960 DebugVariable(User->getVariable(), User->getExpression(),
4961 User->getDebugLoc()->getInlinedAt());
4962
4963 if (!SunkVariables.insert(DbgUserVariable).second)
4964 continue;
4965
4966 // Leave dbg.assign intrinsics in their original positions and there should
4967 // be no need to insert a clone.
4968 if (isa<DbgAssignIntrinsic>(User))
4969 continue;
4970
4971 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
4972 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
4973 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
4974 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
4975 }
4976
4977 // Perform salvaging without the clones, then sink the clones.
4978 if (!DIIClones.empty()) {
4979 salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, {});
4980 // The clones are in reverse order of original appearance, reverse again to
4981 // maintain the original order.
4982 for (auto &DIIClone : llvm::reverse(DIIClones)) {
4983 DIIClone->insertBefore(&*InsertPos);
4984 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
4985 }
4986 }
4987}
4988
4990 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4991 BasicBlock *DestBlock,
4992 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
4993 // Implementation of tryToSinkInstructionDbgValues, but for the
4994 // DbgVariableRecord of variable assignments rather than dbg.values.
4995
4996 // Fetch all DbgVariableRecords not already in the destination.
4997 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
4998 for (auto &DVR : DbgVariableRecords)
4999 if (DVR->getParent() != DestBlock)
5000 DbgVariableRecordsToSalvage.push_back(DVR);
5001
5002 // Fetch a second collection, of DbgVariableRecords in the source block that
5003 // we're going to sink.
5004 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5005 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5006 if (DVR->getParent() == SrcBlock)
5007 DbgVariableRecordsToSink.push_back(DVR);
5008
5009 // Sort DbgVariableRecords according to their position in the block. This is a
5010 // partial order: DbgVariableRecords attached to different instructions will
5011 // be ordered by the instruction order, but DbgVariableRecords attached to the
5012 // same instruction won't have an order.
5013 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5014 return B->getInstruction()->comesBefore(A->getInstruction());
5015 };
5016 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5017
5018 // If there are two assignments to the same variable attached to the same
5019 // instruction, the ordering between the two assignments is important. Scan
5020 // for this (rare) case and establish which is the last assignment.
5021 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5023 if (DbgVariableRecordsToSink.size() > 1) {
5025 // Count how many assignments to each variable there is per instruction.
5026 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5027 DebugVariable DbgUserVariable =
5028 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5029 DVR->getDebugLoc()->getInlinedAt());
5030 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5031 }
5032
5033 // If there are any instructions with two assignments, add them to the
5034 // FilterOutMap to record that they need extra filtering.
5036 for (auto It : CountMap) {
5037 if (It.second > 1) {
5038 FilterOutMap[It.first] = nullptr;
5039 DupSet.insert(It.first.first);
5040 }
5041 }
5042
5043 // For all instruction/variable pairs needing extra filtering, find the
5044 // latest assignment.
5045 for (const Instruction *Inst : DupSet) {
5046 for (DbgVariableRecord &DVR :
5047 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5048 DebugVariable DbgUserVariable =
5049 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5050 DVR.getDebugLoc()->getInlinedAt());
5051 auto FilterIt =
5052 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5053 if (FilterIt == FilterOutMap.end())
5054 continue;
5055 if (FilterIt->second != nullptr)
5056 continue;
5057 FilterIt->second = &DVR;
5058 }
5059 }
5060 }
5061
5062 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5063 // out any duplicate assignments identified above.
5065 SmallSet<DebugVariable, 4> SunkVariables;
5066 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5068 continue;
5069
5070 DebugVariable DbgUserVariable =
5071 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5072 DVR->getDebugLoc()->getInlinedAt());
5073
5074 // For any variable where there were multiple assignments in the same place,
5075 // ignore all but the last assignment.
5076 if (!FilterOutMap.empty()) {
5077 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5078 auto It = FilterOutMap.find(IVP);
5079
5080 // Filter out.
5081 if (It != FilterOutMap.end() && It->second != DVR)
5082 continue;
5083 }
5084
5085 if (!SunkVariables.insert(DbgUserVariable).second)
5086 continue;
5087
5088 if (DVR->isDbgAssign())
5089 continue;
5090
5091 DVRClones.emplace_back(DVR->clone());
5092 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5093 }
5094
5095 // Perform salvaging without the clones, then sink the clones.
5096 if (DVRClones.empty())
5097 return;
5098
5099 salvageDebugInfoForDbgValues(*I, {}, DbgVariableRecordsToSalvage);
5100
5101 // The clones are in reverse order of original appearance. Assert that the
5102 // head bit is set on the iterator as we _should_ have received it via
5103 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5104 // we'll repeatedly insert at the head, such as:
5105 // DVR-3 (third insertion goes here)
5106 // DVR-2 (second insertion goes here)
5107 // DVR-1 (first insertion goes here)
5108 // Any-Prior-DVRs
5109 // InsertPtInst
5110 assert(InsertPos.getHeadBit());
5111 for (DbgVariableRecord *DVRClone : DVRClones) {
5112 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5113 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5114 }
5115}
5116
5118 while (!Worklist.isEmpty()) {
5119 // Walk deferred instructions in reverse order, and push them to the
5120 // worklist, which means they'll end up popped from the worklist in-order.
5121 while (Instruction *I = Worklist.popDeferred()) {
5122 // Check to see if we can DCE the instruction. We do this already here to
5123 // reduce the number of uses and thus allow other folds to trigger.
5124 // Note that eraseInstFromFunction() may push additional instructions on
5125 // the deferred worklist, so this will DCE whole instruction chains.
5128 ++NumDeadInst;
5129 continue;
5130 }
5131
5132 Worklist.push(I);
5133 }
5134
5136 if (I == nullptr) continue; // skip null values.
5137
5138 // Check to see if we can DCE the instruction.
5141 ++NumDeadInst;
5142 continue;
5143 }
5144
5145 if (!DebugCounter::shouldExecute(VisitCounter))
5146 continue;
5147
5148 // See if we can trivially sink this instruction to its user if we can
5149 // prove that the successor is not executed more frequently than our block.
5150 // Return the UserBlock if successful.
5151 auto getOptionalSinkBlockForInst =
5152 [this](Instruction *I) -> std::optional<BasicBlock *> {
5153 if (!EnableCodeSinking)
5154 return std::nullopt;
5155
5156 BasicBlock *BB = I->getParent();
5157 BasicBlock *UserParent = nullptr;
5158 unsigned NumUsers = 0;
5159
5160 for (Use &U : I->uses()) {
5161 User *User = U.getUser();
5162 if (User->isDroppable())
5163 continue;
5164 if (NumUsers > MaxSinkNumUsers)
5165 return std::nullopt;
5166
5167 Instruction *UserInst = cast<Instruction>(User);
5168 // Special handling for Phi nodes - get the block the use occurs in.
5169 BasicBlock *UserBB = UserInst->getParent();
5170 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5171 UserBB = PN->getIncomingBlock(U);
5172 // Bail out if we have uses in different blocks. We don't do any
5173 // sophisticated analysis (i.e finding NearestCommonDominator of these
5174 // use blocks).
5175 if (UserParent && UserParent != UserBB)
5176 return std::nullopt;
5177 UserParent = UserBB;
5178
5179 // Make sure these checks are done only once, naturally we do the checks
5180 // the first time we get the userparent, this will save compile time.
5181 if (NumUsers == 0) {
5182 // Try sinking to another block. If that block is unreachable, then do
5183 // not bother. SimplifyCFG should handle it.
5184 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5185 return std::nullopt;
5186
5187 auto *Term = UserParent->getTerminator();
5188 // See if the user is one of our successors that has only one
5189 // predecessor, so that we don't have to split the critical edge.
5190 // Another option where we can sink is a block that ends with a
5191 // terminator that does not pass control to other block (such as
5192 // return or unreachable or resume). In this case:
5193 // - I dominates the User (by SSA form);
5194 // - the User will be executed at most once.
5195 // So sinking I down to User is always profitable or neutral.
5196 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5197 return std::nullopt;
5198
5199 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5200 }
5201
5202 NumUsers++;
5203 }
5204
5205 // No user or only has droppable users.
5206 if (!UserParent)
5207 return std::nullopt;
5208
5209 return UserParent;
5210 };
5211
5212 auto OptBB = getOptionalSinkBlockForInst(I);
5213 if (OptBB) {
5214 auto *UserParent = *OptBB;
5215 // Okay, the CFG is simple enough, try to sink this instruction.
5216 if (tryToSinkInstruction(I, UserParent)) {
5217 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5218 MadeIRChange = true;
5219 // We'll add uses of the sunk instruction below, but since
5220 // sinking can expose opportunities for it's *operands* add
5221 // them to the worklist
5222 for (Use &U : I->operands())
5223 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5224 Worklist.push(OpI);
5225 }
5226 }
5227
5228 // Now that we have an instruction, try combining it to simplify it.
5231 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5232
5233#ifndef NDEBUG
5234 std::string OrigI;
5235#endif
5236 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5237 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5238
5239 if (Instruction *Result = visit(*I)) {
5240 ++NumCombined;
5241 // Should we replace the old instruction with a new one?
5242 if (Result != I) {
5243 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5244 << " New = " << *Result << '\n');
5245
5246 // We copy the old instruction's DebugLoc to the new instruction, unless
5247 // InstCombine already assigned a DebugLoc to it, in which case we
5248 // should trust the more specifically selected DebugLoc.
5249 if (!Result->getDebugLoc())
5250 Result->setDebugLoc(I->getDebugLoc());
5251 // We also copy annotation metadata to the new instruction.
5252 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5253 // Everything uses the new instruction now.
5254 I->replaceAllUsesWith(Result);
5255
5256 // Move the name to the new instruction first.
5257 Result->takeName(I);
5258
5259 // Insert the new instruction into the basic block...
5260 BasicBlock *InstParent = I->getParent();
5261 BasicBlock::iterator InsertPos = I->getIterator();
5262
5263 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5264 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5265 // We need to fix up the insertion point.
5266 if (isa<PHINode>(I)) // PHI -> Non-PHI
5267 InsertPos = InstParent->getFirstInsertionPt();
5268 else // Non-PHI -> PHI
5269 InsertPos = InstParent->getFirstNonPHIIt();
5270 }
5271
5272 Result->insertInto(InstParent, InsertPos);
5273
5274 // Push the new instruction and any users onto the worklist.
5276 Worklist.push(Result);
5277
5279 } else {
5280 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5281 << " New = " << *I << '\n');
5282
5283 // If the instruction was modified, it's possible that it is now dead.
5284 // if so, remove it.
5287 } else {
5289 Worklist.push(I);
5290 }
5291 }
5292 MadeIRChange = true;
5293 }
5294 }
5295
5296 Worklist.zap();
5297 return MadeIRChange;
5298}
5299
5300// Track the scopes used by !alias.scope and !noalias. In a function, a
5301// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5302// by both sets. If not, the declaration of the scope can be safely omitted.
5303// The MDNode of the scope can be omitted as well for the instructions that are
5304// part of this function. We do not do that at this point, as this might become
5305// too time consuming to do.
5307 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5308 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5309
5310public:
5312 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5313 if (!I->hasMetadataOtherThanDebugLoc())
5314 return;
5315
5316 auto Track = [](Metadata *ScopeList, auto &Container) {
5317 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5318 if (!MDScopeList || !Container.insert(MDScopeList).second)
5319 return;
5320 for (const auto &MDOperand : MDScopeList->operands())
5321 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5322 Container.insert(MDScope);
5323 };
5324
5325 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5326 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5327 }
5328
5330 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
5331 if (!Decl)
5332 return false;
5333
5334 assert(Decl->use_empty() &&
5335 "llvm.experimental.noalias.scope.decl in use ?");
5336 const MDNode *MDSL = Decl->getScopeList();
5337 assert(MDSL->getNumOperands() == 1 &&
5338 "llvm.experimental.noalias.scope should refer to a single scope");
5339 auto &MDOperand = MDSL->getOperand(0);
5340 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5341 return !UsedAliasScopesAndLists.contains(MD) ||
5342 !UsedNoAliasScopesAndLists.contains(MD);
5343
5344 // Not an MDNode ? throw away.
5345 return true;
5346 }
5347};
5348
5349/// Populate the IC worklist from a function, by walking it in reverse
5350/// post-order and adding all reachable code to the worklist.
5351///
5352/// This has a couple of tricks to make the code faster and more powerful. In
5353/// particular, we constant fold and DCE instructions as we go, to avoid adding
5354/// them to the worklist (this significantly speeds up instcombine on code where
5355/// many instructions are dead or constant). Additionally, if we find a branch
5356/// whose condition is a known constant, we only visit the reachable successors.
5358 bool MadeIRChange = false;
5360 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5361 DenseMap<Constant *, Constant *> FoldedConstants;
5362 AliasScopeTracker SeenAliasScopes;
5363
5364 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5365 for (BasicBlock *Succ : successors(BB))
5366 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5367 for (PHINode &PN : Succ->phis())
5368 for (Use &U : PN.incoming_values())
5369 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5370 U.set(PoisonValue::get(PN.getType()));
5371 MadeIRChange = true;
5372 }
5373 };
5374
5375 for (BasicBlock *BB : RPOT) {
5376 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5377 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5378 })) {
5379 HandleOnlyLiveSuccessor(BB, nullptr);
5380 continue;
5381 }
5382 LiveBlocks.insert(BB);
5383
5384 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5385 // ConstantProp instruction if trivially constant.
5386 if (!Inst.use_empty() &&
5387 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5388 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5389 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5390 << '\n');
5391 Inst.replaceAllUsesWith(C);
5392 ++NumConstProp;
5393 if (isInstructionTriviallyDead(&Inst, &TLI))
5394 Inst.eraseFromParent();
5395 MadeIRChange = true;
5396 continue;
5397 }
5398
5399 // See if we can constant fold its operands.
5400 for (Use &U : Inst.operands()) {
5401 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5402 continue;
5403
5404 auto *C = cast<Constant>(U);
5405 Constant *&FoldRes = FoldedConstants[C];
5406 if (!FoldRes)
5407 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5408
5409 if (FoldRes != C) {
5410 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5411 << "\n Old = " << *C
5412 << "\n New = " << *FoldRes << '\n');
5413 U = FoldRes;
5414 MadeIRChange = true;
5415 }
5416 }
5417
5418 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5419 // these call instructions consumes non-trivial amount of time and
5420 // provides no value for the optimization.
5421 if (!Inst.isDebugOrPseudoInst()) {
5422 InstrsForInstructionWorklist.push_back(&Inst);
5423 SeenAliasScopes.analyse(&Inst);
5424 }
5425 }
5426
5427 // If this is a branch or switch on a constant, mark only the single
5428 // live successor. Otherwise assume all successors are live.
5429 Instruction *TI = BB->getTerminator();
5430 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5431 if (isa<UndefValue>(BI->getCondition())) {
5432 // Branch on undef is UB.
5433 HandleOnlyLiveSuccessor(BB, nullptr);
5434 continue;
5435 }
5436 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5437 bool CondVal = Cond->getZExtValue();
5438 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5439 continue;
5440 }
5441 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5442 if (isa<UndefValue>(SI->getCondition())) {
5443 // Switch on undef is UB.
5444 HandleOnlyLiveSuccessor(BB, nullptr);
5445 continue;
5446 }
5447 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5448 HandleOnlyLiveSuccessor(BB,
5449 SI->findCaseValue(Cond)->getCaseSuccessor());
5450 continue;
5451 }
5452 }
5453 }
5454
5455 // Remove instructions inside unreachable blocks. This prevents the
5456 // instcombine code from having to deal with some bad special cases, and
5457 // reduces use counts of instructions.
5458 for (BasicBlock &BB : F) {
5459 if (LiveBlocks.count(&BB))
5460 continue;
5461
5462 unsigned NumDeadInstInBB;
5463 unsigned NumDeadDbgInstInBB;
5464 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5466
5467 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5468 NumDeadInst += NumDeadInstInBB;
5469 }
5470
5471 // Once we've found all of the instructions to add to instcombine's worklist,
5472 // add them in reverse order. This way instcombine will visit from the top
5473 // of the function down. This jives well with the way that it adds all uses
5474 // of instructions to the worklist after doing a transformation, thus avoiding
5475 // some N^2 behavior in pathological cases.
5476 Worklist.reserve(InstrsForInstructionWorklist.size());
5477 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5478 // DCE instruction if trivially dead. As we iterate in reverse program
5479 // order here, we will clean up whole chains of dead instructions.
5480 if (isInstructionTriviallyDead(Inst, &TLI) ||
5481 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5482 ++NumDeadInst;
5483 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5484 salvageDebugInfo(*Inst);
5485 Inst->eraseFromParent();
5486 MadeIRChange = true;
5487 continue;
5488 }
5489
5490 Worklist.push(Inst);
5491 }
5492
5493 return MadeIRChange;
5494}
5495
5497 // Collect backedges.
5499 for (BasicBlock *BB : RPOT) {
5500 Visited.insert(BB);
5501 for (BasicBlock *Succ : successors(BB))
5502 if (Visited.contains(Succ))
5503 BackEdges.insert({BB, Succ});
5504 }
5505 ComputedBackEdges = true;
5506}
5507
5513 const InstCombineOptions &Opts) {
5514 auto &DL = F.getDataLayout();
5515 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5516 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5517
5518 /// Builder - This is an IRBuilder that automatically inserts new
5519 /// instructions into the worklist when they are created.
5521 F.getContext(), TargetFolder(DL),
5522 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5523 Worklist.add(I);
5524 if (auto *Assume = dyn_cast<AssumeInst>(I))
5525 AC.registerAssumption(Assume);
5526 }));
5527
5529
5530 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5531 // by instcombiner.
5532 bool MadeIRChange = false;
5534 MadeIRChange = LowerDbgDeclare(F);
5535
5536 // Iterate while there is work to do.
5537 unsigned Iteration = 0;
5538 while (true) {
5539 ++Iteration;
5540
5541 if (Iteration > Opts.MaxIterations && !VerifyFixpoint) {
5542 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5543 << " on " << F.getName()
5544 << " reached; stopping without verifying fixpoint\n");
5545 break;
5546 }
5547
5548 ++NumWorklistIterations;
5549 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5550 << F.getName() << "\n");
5551
5552 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5553 ORE, BFI, BPI, PSI, DL, RPOT);
5555 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5556 MadeChangeInThisIteration |= IC.run();
5557 if (!MadeChangeInThisIteration)
5558 break;
5559
5560 MadeIRChange = true;
5561 if (Iteration > Opts.MaxIterations) {
5563 "Instruction Combining on " + Twine(F.getName()) +
5564 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5565 " iterations. " +
5566 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5567 "'instcombine-no-verify-fixpoint' to suppress this error.",
5568 /*GenCrashDiag=*/false);
5569 }
5570 }
5571
5572 if (Iteration == 1)
5573 ++NumOneIteration;
5574 else if (Iteration == 2)
5575 ++NumTwoIterations;
5576 else if (Iteration == 3)
5577 ++NumThreeIterations;
5578 else
5579 ++NumFourOrMoreIterations;
5580
5581 return MadeIRChange;
5582}
5583
5585
5587 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5588 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5589 OS, MapClassName2PassName);
5590 OS << '<';
5591 OS << "max-iterations=" << Options.MaxIterations << ";";
5592 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5593 OS << '>';
5594}
5595
5596char InstCombinePass::ID = 0;
5597
5600 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
5601 // No changes since last InstCombine pass, exit early.
5602 if (LRT.shouldSkip(&ID))
5603 return PreservedAnalyses::all();
5604
5605 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5606 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5607 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5609 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5610
5611 auto *AA = &AM.getResult<AAManager>(F);
5612 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5613 ProfileSummaryInfo *PSI =
5614 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5615 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5616 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
5618
5619 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5620 BFI, BPI, PSI, Options)) {
5621 // No changes, all analyses are preserved.
5622 LRT.update(&ID, /*Changed=*/false);
5623 return PreservedAnalyses::all();
5624 }
5625
5626 // Mark all the analyses that instcombine updates as preserved.
5628 LRT.update(&ID, /*Changed=*/true);
5631 return PA;
5632}
5633
5635 AU.setPreservesCFG();
5648}
5649
5651 if (skipFunction(F))
5652 return false;
5653
5654 // Required analyses.
5655 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5656 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5657 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
5658 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5659 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5660 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5661
5662 // Optional analyses.
5663 ProfileSummaryInfo *PSI =
5664 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5665 BlockFrequencyInfo *BFI =
5666 (PSI && PSI->hasProfileSummary()) ?
5667 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5668 nullptr;
5669 BranchProbabilityInfo *BPI = nullptr;
5670 if (auto *WrapperPass =
5671 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5672 BPI = &WrapperPass->getBPI();
5673
5674 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5675 BFI, BPI, PSI, InstCombineOptions());
5676}
5677
5679
5682}
5683
5685 "Combine redundant instructions", false, false)
5697
5698// Initialization Routines
5701}
5702
5704 return new InstructionCombiningPass();
5705}
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:190
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
Hexagon Vector Combine
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned OpIndex
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1732
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:423
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition: APInt.cpp:1864
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:910
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1902
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:827
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1934
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1915
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:851
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:256
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
Class to represent array types.
Definition: DerivedTypes.h:395
uint64_t getNumElements() const
Definition: DerivedTypes.h:407
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
Definition: DerivedTypes.h:408
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
Definition: Attributes.cpp:439
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
Definition: BasicBlock.cpp:250
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:374
const Instruction & front() const
Definition: BasicBlock.h:471
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:571
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:467
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
Definition: BasicBlock.cpp:430
size_t size() const
Definition: BasicBlock.h:469
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:293
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1112
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1420
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1925
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1286
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1417
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:787
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
ConstantArray - Constant Array Declarations.
Definition: Constants.h:427
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1312
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:770
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2644
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2631
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2637
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2691
static Constant * getNeg(Constant *C, bool HasNSW=false)
Definition: Constants.cpp:2625
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:880
This class represents a range of values.
Definition: ConstantRange.h:47
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition: Constants.h:511
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1421
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:403
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:784
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
const Constant * stripPointerCasts() const
Definition: Constant.h:218
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
Definition: DataLayout.cpp:971
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
Definition: DataLayout.h:219
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:754
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:878
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:369
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:617
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:893
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:87
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:205
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
const BasicBlock & getEntryBlock() const
Definition: Function.h:809
Represents flags for the getelementptr instruction/expression.
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
Definition: Operator.h:430
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:956
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:980
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:113
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1700
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2555
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1053
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2045
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2574
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1987
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:330
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1882
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1874
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Definition: IRBuilder.h:252
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:889
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:900
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:505
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2404
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2435
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1757
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1387
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1798
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2533
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1518
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1370
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2019
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1671
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2225
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:199
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1499
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1562
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2380
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1694
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:535
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition: IRBuilder.h:521
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition: IRBuilder.h:74
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
Definition: InstCombiner.h:48
SimplifyQuery SQ
Definition: InstCombiner.h:77
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:337
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Definition: InstCombiner.h:228
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Definition: InstCombiner.h:143
TargetLibraryInfo & TLI
Definition: InstCombiner.h:74
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:368
AAResults * AA
Definition: InstCombiner.h:70
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:388
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:56
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
Definition: InstCombiner.h:187
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
Definition: InstCombiner.h:420
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
Definition: InstCombiner.h:160
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:65
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
Definition: InstCombiner.h:377
BranchProbabilityInfo * BPI
Definition: InstCombiner.h:80
ReversePostOrderTraversal< BasicBlock * > & RPOT
Definition: InstCombiner.h:84
const DataLayout & DL
Definition: InstCombiner.h:76
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombiner.h:455
DomConditionCache DC
Definition: InstCombiner.h:82
const bool MinimizeSize
Definition: InstCombiner.h:68
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:332
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
Definition: InstCombiner.h:97
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:412
DominatorTree & DT
Definition: InstCombiner.h:75
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
Definition: InstCombiner.h:280
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
Definition: InstCombiner.h:89
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
Definition: InstCombiner.h:433
BuilderTy & Builder
Definition: InstCombiner.h:61
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
Definition: InstCombiner.h:209
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
Definition: InstCombiner.h:358
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition: InstCombine.h:66
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
void zap()
Check that the worklist is empty and nuke the backing store for the map.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
Definition: Instruction.h:328
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:475
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:68
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1764
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:72
bool isTerminator() const
Definition: Instruction.h:277
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
Definition: Instruction.h:333
bool isShift() const
Definition: Instruction.h:282
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:472
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isIntDivRem() const
Definition: Instruction.h:280
Class to represent integer types.
Definition: DerivedTypes.h:42
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Definition: Instructions.h:176
Metadata node.
Definition: Metadata.h:1073
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1440
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:895
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition: Metadata.h:62
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:692
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:110
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:104
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition: Constants.h:1460
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition: SmallPtrSet.h:94
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
iterator erase(const_iterator CI)
Definition: SmallVector.h:737
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
typename SuperClass::iterator iterator
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Definition: TargetFolder.h:34
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:258
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:288
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
op_iterator op_begin()
Definition: User.h:280
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:740
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:157
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:852
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isZero() const
Definition: TypeSize.h:156
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:135
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:731
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:982
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:826
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
Definition: PatternMatch.h:186
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Definition: PatternMatch.h:560
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
Definition: PatternMatch.h:245
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition: PatternMatch.h:305
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:864
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
Definition: PatternMatch.h:791
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:480
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:854
void stable_sort(R &&Range)
Definition: STLExtras.h:2037
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2877
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2448
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2316
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:162
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1683
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2859
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
Definition: bit.h:146
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:406
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition: Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:44
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1990
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value that has an associated llvm....
Definition: Local.cpp:1731
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2787
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:336
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition: STLExtras.h:2067
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
static unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:315
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:243
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:43
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:240
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const