LLVM 20.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APInt.h"
37#include "llvm/ADT/ArrayRef.h"
38#include "llvm/ADT/DenseMap.h"
41#include "llvm/ADT/Statistic.h"
46#include "llvm/Analysis/CFG.h"
61#include "llvm/IR/BasicBlock.h"
62#include "llvm/IR/CFG.h"
63#include "llvm/IR/Constant.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DIBuilder.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
69#include "llvm/IR/Dominators.h"
71#include "llvm/IR/Function.h"
73#include "llvm/IR/IRBuilder.h"
74#include "llvm/IR/InstrTypes.h"
75#include "llvm/IR/Instruction.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/Metadata.h"
80#include "llvm/IR/Operator.h"
81#include "llvm/IR/PassManager.h"
83#include "llvm/IR/Type.h"
84#include "llvm/IR/Use.h"
85#include "llvm/IR/User.h"
86#include "llvm/IR/Value.h"
87#include "llvm/IR/ValueHandle.h"
92#include "llvm/Support/Debug.h"
100#include <algorithm>
101#include <cassert>
102#include <cstdint>
103#include <memory>
104#include <optional>
105#include <string>
106#include <utility>
107
108#define DEBUG_TYPE "instcombine"
110#include <optional>
111
112using namespace llvm;
113using namespace llvm::PatternMatch;
114
115STATISTIC(NumWorklistIterations,
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration, "Number of functions with one iteration");
118STATISTIC(NumTwoIterations, "Number of functions with two iterations");
119STATISTIC(NumThreeIterations, "Number of functions with three iterations");
120STATISTIC(NumFourOrMoreIterations,
121 "Number of functions with four or more iterations");
122
123STATISTIC(NumCombined , "Number of insts combined");
124STATISTIC(NumConstProp, "Number of constant folds");
125STATISTIC(NumDeadInst , "Number of dead inst eliminated");
126STATISTIC(NumSunkInst , "Number of instructions sunk");
127STATISTIC(NumExpand, "Number of expansions");
128STATISTIC(NumFactor , "Number of factorizations");
129STATISTIC(NumReassoc , "Number of reassociations");
130DEBUG_COUNTER(VisitCounter, "instcombine-visit",
131 "Controls which instructions are visited");
132
133static cl::opt<bool>
134EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
135 cl::init(true));
136
138 "instcombine-max-sink-users", cl::init(32),
139 cl::desc("Maximum number of undroppable users for instruction sinking"));
140
142MaxArraySize("instcombine-maxarray-size", cl::init(1024),
143 cl::desc("Maximum array size considered when doing a combine"));
144
145// FIXME: Remove this flag when it is no longer necessary to convert
146// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
147// increases variable availability at the cost of accuracy. Variables that
148// cannot be promoted by mem2reg or SROA will be described as living in memory
149// for their entire lifetime. However, passes like DSE and instcombine can
150// delete stores to the alloca, leading to misleading and inaccurate debug
151// information. This flag can be removed when those passes are fixed.
152static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
153 cl::Hidden, cl::init(true));
154
155std::optional<Instruction *>
157 // Handle target specific intrinsics
158 if (II.getCalledFunction()->isTargetIntrinsic()) {
159 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
160 }
161 return std::nullopt;
162}
163
165 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
166 bool &KnownBitsComputed) {
167 // Handle target specific intrinsics
168 if (II.getCalledFunction()->isTargetIntrinsic()) {
169 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
170 *this, II, DemandedMask, Known, KnownBitsComputed);
171 }
172 return std::nullopt;
173}
174
176 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
177 APInt &PoisonElts2, APInt &PoisonElts3,
178 std::function<void(Instruction *, unsigned, APInt, APInt &)>
179 SimplifyAndSetOp) {
180 // Handle target specific intrinsics
181 if (II.getCalledFunction()->isTargetIntrinsic()) {
182 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
183 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
184 SimplifyAndSetOp);
185 }
186 return std::nullopt;
187}
188
189bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
190 // Approved exception for TTI use: This queries a legality property of the
191 // target, not an profitability heuristic. Ideally this should be part of
192 // DataLayout instead.
193 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
194}
195
196Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
197 if (!RewriteGEP)
199
201 auto *Inst = dyn_cast<Instruction>(GEP);
202 if (Inst)
204
205 Value *Offset = EmitGEPOffset(GEP);
206 // If a non-trivial GEP has other uses, rewrite it to avoid duplicating
207 // the offset arithmetic.
208 if (Inst && !GEP->hasOneUse() && !GEP->hasAllConstantIndices() &&
209 !GEP->getSourceElementType()->isIntegerTy(8)) {
211 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
212 Offset, "", GEP->getNoWrapFlags()));
214 }
215 return Offset;
216}
217
218/// Legal integers and common types are considered desirable. This is used to
219/// avoid creating instructions with types that may not be supported well by the
220/// the backend.
221/// NOTE: This treats i8, i16 and i32 specially because they are common
222/// types in frontend languages.
223bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
224 switch (BitWidth) {
225 case 8:
226 case 16:
227 case 32:
228 return true;
229 default:
230 return DL.isLegalInteger(BitWidth);
231 }
232}
233
234/// Return true if it is desirable to convert an integer computation from a
235/// given bit width to a new bit width.
236/// We don't want to convert from a legal or desirable type (like i8) to an
237/// illegal type or from a smaller to a larger illegal type. A width of '1'
238/// is always treated as a desirable type because i1 is a fundamental type in
239/// IR, and there are many specialized optimizations for i1 types.
240/// Common/desirable widths are equally treated as legal to convert to, in
241/// order to open up more combining opportunities.
242bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
243 unsigned ToWidth) const {
244 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
245 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
246
247 // Convert to desirable widths even if they are not legal types.
248 // Only shrink types, to prevent infinite loops.
249 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
250 return true;
251
252 // If this is a legal or desiable integer from type, and the result would be
253 // an illegal type, don't do the transformation.
254 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
255 return false;
256
257 // Otherwise, if both are illegal, do not increase the size of the result. We
258 // do allow things like i160 -> i64, but not i64 -> i160.
259 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
260 return false;
261
262 return true;
263}
264
265/// Return true if it is desirable to convert a computation from 'From' to 'To'.
266/// We don't want to convert from a legal to an illegal type or from a smaller
267/// to a larger illegal type. i1 is always treated as a legal type because it is
268/// a fundamental type in IR, and there are many specialized optimizations for
269/// i1 types.
270bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
271 // TODO: This could be extended to allow vectors. Datalayout changes might be
272 // needed to properly support that.
273 if (!From->isIntegerTy() || !To->isIntegerTy())
274 return false;
275
276 unsigned FromWidth = From->getPrimitiveSizeInBits();
277 unsigned ToWidth = To->getPrimitiveSizeInBits();
278 return shouldChangeType(FromWidth, ToWidth);
279}
280
281// Return true, if No Signed Wrap should be maintained for I.
282// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
283// where both B and C should be ConstantInts, results in a constant that does
284// not overflow. This function only handles the Add and Sub opcodes. For
285// all other opcodes, the function conservatively returns false.
287 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
288 if (!OBO || !OBO->hasNoSignedWrap())
289 return false;
290
291 // We reason about Add and Sub Only.
292 Instruction::BinaryOps Opcode = I.getOpcode();
293 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
294 return false;
295
296 const APInt *BVal, *CVal;
297 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
298 return false;
299
300 bool Overflow = false;
301 if (Opcode == Instruction::Add)
302 (void)BVal->sadd_ov(*CVal, Overflow);
303 else
304 (void)BVal->ssub_ov(*CVal, Overflow);
305
306 return !Overflow;
307}
308
310 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
311 return OBO && OBO->hasNoUnsignedWrap();
312}
313
315 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
316 return OBO && OBO->hasNoSignedWrap();
317}
318
319/// Conservatively clears subclassOptionalData after a reassociation or
320/// commutation. We preserve fast-math flags when applicable as they can be
321/// preserved.
323 FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
324 if (!FPMO) {
325 I.clearSubclassOptionalData();
326 return;
327 }
328
329 FastMathFlags FMF = I.getFastMathFlags();
330 I.clearSubclassOptionalData();
331 I.setFastMathFlags(FMF);
332}
333
334/// Combine constant operands of associative operations either before or after a
335/// cast to eliminate one of the associative operations:
336/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
337/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
339 InstCombinerImpl &IC) {
340 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
341 if (!Cast || !Cast->hasOneUse())
342 return false;
343
344 // TODO: Enhance logic for other casts and remove this check.
345 auto CastOpcode = Cast->getOpcode();
346 if (CastOpcode != Instruction::ZExt)
347 return false;
348
349 // TODO: Enhance logic for other BinOps and remove this check.
350 if (!BinOp1->isBitwiseLogicOp())
351 return false;
352
353 auto AssocOpcode = BinOp1->getOpcode();
354 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
355 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
356 return false;
357
358 Constant *C1, *C2;
359 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
360 !match(BinOp2->getOperand(1), m_Constant(C2)))
361 return false;
362
363 // TODO: This assumes a zext cast.
364 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
365 // to the destination type might lose bits.
366
367 // Fold the constants together in the destination type:
368 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
369 const DataLayout &DL = IC.getDataLayout();
370 Type *DestTy = C1->getType();
371 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
372 if (!CastC2)
373 return false;
374 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
375 if (!FoldedC)
376 return false;
377
378 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
379 IC.replaceOperand(*BinOp1, 1, FoldedC);
381 Cast->dropPoisonGeneratingFlags();
382 return true;
383}
384
385// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
386// inttoptr ( ptrtoint (x) ) --> x
387Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
388 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
389 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
390 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
391 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
392 Type *CastTy = IntToPtr->getDestTy();
393 if (PtrToInt &&
394 CastTy->getPointerAddressSpace() ==
395 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
396 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
397 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
398 return PtrToInt->getOperand(0);
399 }
400 return nullptr;
401}
402
403/// This performs a few simplifications for operators that are associative or
404/// commutative:
405///
406/// Commutative operators:
407///
408/// 1. Order operands such that they are listed from right (least complex) to
409/// left (most complex). This puts constants before unary operators before
410/// binary operators.
411///
412/// Associative operators:
413///
414/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
415/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
416///
417/// Associative and commutative operators:
418///
419/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
420/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
421/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
422/// if C1 and C2 are constants.
424 Instruction::BinaryOps Opcode = I.getOpcode();
425 bool Changed = false;
426
427 do {
428 // Order operands such that they are listed from right (least complex) to
429 // left (most complex). This puts constants before unary operators before
430 // binary operators.
431 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
432 getComplexity(I.getOperand(1)))
433 Changed = !I.swapOperands();
434
435 if (I.isCommutative()) {
436 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
437 replaceOperand(I, 0, Pair->first);
438 replaceOperand(I, 1, Pair->second);
439 Changed = true;
440 }
441 }
442
443 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
444 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
445
446 if (I.isAssociative()) {
447 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
448 if (Op0 && Op0->getOpcode() == Opcode) {
449 Value *A = Op0->getOperand(0);
450 Value *B = Op0->getOperand(1);
451 Value *C = I.getOperand(1);
452
453 // Does "B op C" simplify?
454 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
455 // It simplifies to V. Form "A op V".
456 replaceOperand(I, 0, A);
457 replaceOperand(I, 1, V);
458 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
459 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
460
461 // Conservatively clear all optional flags since they may not be
462 // preserved by the reassociation. Reset nsw/nuw based on the above
463 // analysis.
465
466 // Note: this is only valid because SimplifyBinOp doesn't look at
467 // the operands to Op0.
468 if (IsNUW)
469 I.setHasNoUnsignedWrap(true);
470
471 if (IsNSW)
472 I.setHasNoSignedWrap(true);
473
474 Changed = true;
475 ++NumReassoc;
476 continue;
477 }
478 }
479
480 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
481 if (Op1 && Op1->getOpcode() == Opcode) {
482 Value *A = I.getOperand(0);
483 Value *B = Op1->getOperand(0);
484 Value *C = Op1->getOperand(1);
485
486 // Does "A op B" simplify?
487 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
488 // It simplifies to V. Form "V op C".
489 replaceOperand(I, 0, V);
490 replaceOperand(I, 1, C);
491 // Conservatively clear the optional flags, since they may not be
492 // preserved by the reassociation.
494 Changed = true;
495 ++NumReassoc;
496 continue;
497 }
498 }
499 }
500
501 if (I.isAssociative() && I.isCommutative()) {
502 if (simplifyAssocCastAssoc(&I, *this)) {
503 Changed = true;
504 ++NumReassoc;
505 continue;
506 }
507
508 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
509 if (Op0 && Op0->getOpcode() == Opcode) {
510 Value *A = Op0->getOperand(0);
511 Value *B = Op0->getOperand(1);
512 Value *C = I.getOperand(1);
513
514 // Does "C op A" simplify?
515 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
516 // It simplifies to V. Form "V op B".
517 replaceOperand(I, 0, V);
518 replaceOperand(I, 1, B);
519 // Conservatively clear the optional flags, since they may not be
520 // preserved by the reassociation.
522 Changed = true;
523 ++NumReassoc;
524 continue;
525 }
526 }
527
528 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
529 if (Op1 && Op1->getOpcode() == Opcode) {
530 Value *A = I.getOperand(0);
531 Value *B = Op1->getOperand(0);
532 Value *C = Op1->getOperand(1);
533
534 // Does "C op A" simplify?
535 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
536 // It simplifies to V. Form "B op V".
537 replaceOperand(I, 0, B);
538 replaceOperand(I, 1, V);
539 // Conservatively clear the optional flags, since they may not be
540 // preserved by the reassociation.
542 Changed = true;
543 ++NumReassoc;
544 continue;
545 }
546 }
547
548 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
549 // if C1 and C2 are constants.
550 Value *A, *B;
551 Constant *C1, *C2, *CRes;
552 if (Op0 && Op1 &&
553 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
554 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
555 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
556 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
557 bool IsNUW = hasNoUnsignedWrap(I) &&
558 hasNoUnsignedWrap(*Op0) &&
559 hasNoUnsignedWrap(*Op1);
560 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
561 BinaryOperator::CreateNUW(Opcode, A, B) :
562 BinaryOperator::Create(Opcode, A, B);
563
564 if (isa<FPMathOperator>(NewBO)) {
565 FastMathFlags Flags = I.getFastMathFlags() &
566 Op0->getFastMathFlags() &
567 Op1->getFastMathFlags();
568 NewBO->setFastMathFlags(Flags);
569 }
570 InsertNewInstWith(NewBO, I.getIterator());
571 NewBO->takeName(Op1);
572 replaceOperand(I, 0, NewBO);
573 replaceOperand(I, 1, CRes);
574 // Conservatively clear the optional flags, since they may not be
575 // preserved by the reassociation.
577 if (IsNUW)
578 I.setHasNoUnsignedWrap(true);
579
580 Changed = true;
581 continue;
582 }
583 }
584
585 // No further simplifications.
586 return Changed;
587 } while (true);
588}
589
590/// Return whether "X LOp (Y ROp Z)" is always equal to
591/// "(X LOp Y) ROp (X LOp Z)".
594 // X & (Y | Z) <--> (X & Y) | (X & Z)
595 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
596 if (LOp == Instruction::And)
597 return ROp == Instruction::Or || ROp == Instruction::Xor;
598
599 // X | (Y & Z) <--> (X | Y) & (X | Z)
600 if (LOp == Instruction::Or)
601 return ROp == Instruction::And;
602
603 // X * (Y + Z) <--> (X * Y) + (X * Z)
604 // X * (Y - Z) <--> (X * Y) - (X * Z)
605 if (LOp == Instruction::Mul)
606 return ROp == Instruction::Add || ROp == Instruction::Sub;
607
608 return false;
609}
610
611/// Return whether "(X LOp Y) ROp Z" is always equal to
612/// "(X ROp Z) LOp (Y ROp Z)".
616 return leftDistributesOverRight(ROp, LOp);
617
618 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
620
621 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
622 // but this requires knowing that the addition does not overflow and other
623 // such subtleties.
624}
625
626/// This function returns identity value for given opcode, which can be used to
627/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
629 if (isa<Constant>(V))
630 return nullptr;
631
632 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
633}
634
635/// This function predicates factorization using distributive laws. By default,
636/// it just returns the 'Op' inputs. But for special-cases like
637/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
638/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
639/// allow more factorization opportunities.
642 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
643 assert(Op && "Expected a binary operator");
644 LHS = Op->getOperand(0);
645 RHS = Op->getOperand(1);
646 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
647 Constant *C;
648 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
649 // X << C --> X * (1 << C)
651 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
652 assert(RHS && "Constant folding of immediate constants failed");
653 return Instruction::Mul;
654 }
655 // TODO: We can add other conversions e.g. shr => div etc.
656 }
657 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
658 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
660 // lshr nneg C, X --> ashr nneg C, X
661 return Instruction::AShr;
662 }
663 }
664 return Op->getOpcode();
665}
666
667/// This tries to simplify binary operations by factorizing out common terms
668/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
671 Instruction::BinaryOps InnerOpcode, Value *A,
672 Value *B, Value *C, Value *D) {
673 assert(A && B && C && D && "All values must be provided");
674
675 Value *V = nullptr;
676 Value *RetVal = nullptr;
677 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
678 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
679
680 // Does "X op' Y" always equal "Y op' X"?
681 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
682
683 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
684 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
685 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
686 // commutative case, "(A op' B) op (C op' A)"?
687 if (A == C || (InnerCommutative && A == D)) {
688 if (A != C)
689 std::swap(C, D);
690 // Consider forming "A op' (B op D)".
691 // If "B op D" simplifies then it can be formed with no cost.
692 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
693
694 // If "B op D" doesn't simplify then only go on if one of the existing
695 // operations "A op' B" and "C op' D" will be zapped as no longer used.
696 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
697 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
698 if (V)
699 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
700 }
701 }
702
703 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
704 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
705 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
706 // commutative case, "(A op' B) op (B op' D)"?
707 if (B == D || (InnerCommutative && B == C)) {
708 if (B != D)
709 std::swap(C, D);
710 // Consider forming "(A op C) op' B".
711 // If "A op C" simplifies then it can be formed with no cost.
712 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
713
714 // If "A op C" doesn't simplify then only go on if one of the existing
715 // operations "A op' B" and "C op' D" will be zapped as no longer used.
716 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
717 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
718 if (V)
719 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
720 }
721 }
722
723 if (!RetVal)
724 return nullptr;
725
726 ++NumFactor;
727 RetVal->takeName(&I);
728
729 // Try to add no-overflow flags to the final value.
730 if (isa<OverflowingBinaryOperator>(RetVal)) {
731 bool HasNSW = false;
732 bool HasNUW = false;
733 if (isa<OverflowingBinaryOperator>(&I)) {
734 HasNSW = I.hasNoSignedWrap();
735 HasNUW = I.hasNoUnsignedWrap();
736 }
737 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
738 HasNSW &= LOBO->hasNoSignedWrap();
739 HasNUW &= LOBO->hasNoUnsignedWrap();
740 }
741
742 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
743 HasNSW &= ROBO->hasNoSignedWrap();
744 HasNUW &= ROBO->hasNoUnsignedWrap();
745 }
746
747 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
748 // We can propagate 'nsw' if we know that
749 // %Y = mul nsw i16 %X, C
750 // %Z = add nsw i16 %Y, %X
751 // =>
752 // %Z = mul nsw i16 %X, C+1
753 //
754 // iff C+1 isn't INT_MIN
755 const APInt *CInt;
756 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
757 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
758
759 // nuw can be propagated with any constant or nuw value.
760 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
761 }
762 }
763 return RetVal;
764}
765
766// If `I` has one Const operand and the other matches `(ctpop (not x))`,
767// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
768// This is only useful is the new subtract can fold so we only handle the
769// following cases:
770// 1) (add/sub/disjoint_or C, (ctpop (not x))
771// -> (add/sub/disjoint_or C', (ctpop x))
772// 1) (cmp pred C, (ctpop (not x))
773// -> (cmp pred C', (ctpop x))
775 unsigned Opc = I->getOpcode();
776 unsigned ConstIdx = 1;
777 switch (Opc) {
778 default:
779 return nullptr;
780 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
781 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
782 // is constant.
783 case Instruction::Sub:
784 ConstIdx = 0;
785 break;
786 case Instruction::ICmp:
787 // Signed predicates aren't correct in some edge cases like for i2 types, as
788 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
789 // comparisons against it are simplfied to unsigned.
790 if (cast<ICmpInst>(I)->isSigned())
791 return nullptr;
792 break;
793 case Instruction::Or:
794 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
795 return nullptr;
796 [[fallthrough]];
797 case Instruction::Add:
798 break;
799 }
800
801 Value *Op;
802 // Find ctpop.
803 if (!match(I->getOperand(1 - ConstIdx),
804 m_OneUse(m_Intrinsic<Intrinsic::ctpop>(m_Value(Op)))))
805 return nullptr;
806
807 Constant *C;
808 // Check other operand is ImmConstant.
809 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
810 return nullptr;
811
812 Type *Ty = Op->getType();
813 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
814 // Need extra check for icmp. Note if this check is true, it generally means
815 // the icmp will simplify to true/false.
816 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
817 Constant *Cmp =
819 if (!Cmp || !Cmp->isZeroValue())
820 return nullptr;
821 }
822
823 // Check we can invert `(not x)` for free.
824 bool Consumes = false;
825 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
826 return nullptr;
827 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
828 assert(NotOp != nullptr &&
829 "Desync between isFreeToInvert and getFreelyInverted");
830
831 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
832
833 Value *R = nullptr;
834
835 // Do the transformation here to avoid potentially introducing an infinite
836 // loop.
837 switch (Opc) {
838 case Instruction::Sub:
839 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
840 break;
841 case Instruction::Or:
842 case Instruction::Add:
843 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
844 break;
845 case Instruction::ICmp:
846 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
847 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
848 break;
849 default:
850 llvm_unreachable("Unhandled Opcode");
851 }
852 assert(R != nullptr);
853 return replaceInstUsesWith(*I, R);
854}
855
856// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
857// IFF
858// 1) the logic_shifts match
859// 2) either both binops are binops and one is `and` or
860// BinOp1 is `and`
861// (logic_shift (inv_logic_shift C1, C), C) == C1 or
862//
863// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
864//
865// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
866// IFF
867// 1) the logic_shifts match
868// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
869//
870// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
871//
872// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
873// IFF
874// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
875// 2) Binop2 is `not`
876//
877// -> (arithmetic_shift Binop1((not X), Y), Amt)
878
880 const DataLayout &DL = I.getDataLayout();
881 auto IsValidBinOpc = [](unsigned Opc) {
882 switch (Opc) {
883 default:
884 return false;
885 case Instruction::And:
886 case Instruction::Or:
887 case Instruction::Xor:
888 case Instruction::Add:
889 // Skip Sub as we only match constant masks which will canonicalize to use
890 // add.
891 return true;
892 }
893 };
894
895 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
896 // constraints.
897 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
898 unsigned ShOpc) {
899 assert(ShOpc != Instruction::AShr);
900 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
901 ShOpc == Instruction::Shl;
902 };
903
904 auto GetInvShift = [](unsigned ShOpc) {
905 assert(ShOpc != Instruction::AShr);
906 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
907 };
908
909 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
910 unsigned ShOpc, Constant *CMask,
911 Constant *CShift) {
912 // If the BinOp1 is `and` we don't need to check the mask.
913 if (BinOpc1 == Instruction::And)
914 return true;
915
916 // For all other possible transfers we need complete distributable
917 // binop/shift (anything but `add` + `lshr`).
918 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
919 return false;
920
921 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
922 // vecs, otherwise the mask will be simplified and the following check will
923 // handle it).
924 if (BinOpc2 == Instruction::And)
925 return true;
926
927 // Otherwise, need mask that meets the below requirement.
928 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
929 Constant *MaskInvShift =
930 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
931 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
932 CMask;
933 };
934
935 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
936 Constant *CMask, *CShift;
937 Value *X, *Y, *ShiftedX, *Mask, *Shift;
938 if (!match(I.getOperand(ShOpnum),
939 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
940 return nullptr;
941 if (!match(I.getOperand(1 - ShOpnum),
942 m_BinOp(m_Value(ShiftedX), m_Value(Mask))))
943 return nullptr;
944
945 if (!match(ShiftedX, m_OneUse(m_Shift(m_Value(X), m_Specific(Shift)))))
946 return nullptr;
947
948 // Make sure we are matching instruction shifts and not ConstantExpr
949 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
950 auto *IX = dyn_cast<Instruction>(ShiftedX);
951 if (!IY || !IX)
952 return nullptr;
953
954 // LHS and RHS need same shift opcode
955 unsigned ShOpc = IY->getOpcode();
956 if (ShOpc != IX->getOpcode())
957 return nullptr;
958
959 // Make sure binop is real instruction and not ConstantExpr
960 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
961 if (!BO2)
962 return nullptr;
963
964 unsigned BinOpc = BO2->getOpcode();
965 // Make sure we have valid binops.
966 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
967 return nullptr;
968
969 if (ShOpc == Instruction::AShr) {
970 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
971 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
972 Value *NotX = Builder.CreateNot(X);
973 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
975 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
976 }
977
978 return nullptr;
979 }
980
981 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
982 // distribute to drop the shift irrelevant of constants.
983 if (BinOpc == I.getOpcode() &&
984 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
985 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
986 Value *NewBinOp1 = Builder.CreateBinOp(
987 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
988 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
989 }
990
991 // Otherwise we can only distribute by constant shifting the mask, so
992 // ensure we have constants.
993 if (!match(Shift, m_ImmConstant(CShift)))
994 return nullptr;
995 if (!match(Mask, m_ImmConstant(CMask)))
996 return nullptr;
997
998 // Check if we can distribute the binops.
999 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1000 return nullptr;
1001
1002 Constant *NewCMask =
1003 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1004 Value *NewBinOp2 = Builder.CreateBinOp(
1005 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1006 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1007 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1008 NewBinOp1, CShift);
1009 };
1010
1011 if (Instruction *R = MatchBinOp(0))
1012 return R;
1013 return MatchBinOp(1);
1014}
1015
1016// (Binop (zext C), (select C, T, F))
1017// -> (select C, (binop 1, T), (binop 0, F))
1018//
1019// (Binop (sext C), (select C, T, F))
1020// -> (select C, (binop -1, T), (binop 0, F))
1021//
1022// Attempt to simplify binary operations into a select with folded args, when
1023// one operand of the binop is a select instruction and the other operand is a
1024// zext/sext extension, whose value is the select condition.
1027 // TODO: this simplification may be extended to any speculatable instruction,
1028 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1029 Instruction::BinaryOps Opc = I.getOpcode();
1030 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1031 Value *A, *CondVal, *TrueVal, *FalseVal;
1032 Value *CastOp;
1033
1034 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1035 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1036 A->getType()->getScalarSizeInBits() == 1 &&
1037 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1038 m_Value(FalseVal)));
1039 };
1040
1041 // Make sure one side of the binop is a select instruction, and the other is a
1042 // zero/sign extension operating on a i1.
1043 if (MatchSelectAndCast(LHS, RHS))
1044 CastOp = LHS;
1045 else if (MatchSelectAndCast(RHS, LHS))
1046 CastOp = RHS;
1047 else
1048 return nullptr;
1049
1050 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1051 bool IsCastOpRHS = (CastOp == RHS);
1052 bool IsZExt = isa<ZExtInst>(CastOp);
1053 Constant *C;
1054
1055 if (IsTrueArm) {
1056 C = Constant::getNullValue(V->getType());
1057 } else if (IsZExt) {
1058 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1059 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1060 } else {
1061 C = Constant::getAllOnesValue(V->getType());
1062 }
1063
1064 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1065 : Builder.CreateBinOp(Opc, C, V);
1066 };
1067
1068 // If the value used in the zext/sext is the select condition, or the negated
1069 // of the select condition, the binop can be simplified.
1070 if (CondVal == A) {
1071 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1072 return SelectInst::Create(CondVal, NewTrueVal,
1073 NewFoldedConst(true, FalseVal));
1074 }
1075
1076 if (match(A, m_Not(m_Specific(CondVal)))) {
1077 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1078 return SelectInst::Create(CondVal, NewTrueVal,
1079 NewFoldedConst(false, FalseVal));
1080 }
1081
1082 return nullptr;
1083}
1084
1086 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1087 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1088 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1089 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1090 Value *A, *B, *C, *D;
1091 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1092
1093 if (Op0)
1094 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1095 if (Op1)
1096 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1097
1098 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1099 // a common term.
1100 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1101 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1102 return V;
1103
1104 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1105 // term.
1106 if (Op0)
1107 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1108 if (Value *V =
1109 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1110 return V;
1111
1112 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1113 // term.
1114 if (Op1)
1115 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1116 if (Value *V =
1117 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1118 return V;
1119
1120 return nullptr;
1121}
1122
1123/// This tries to simplify binary operations which some other binary operation
1124/// distributes over either by factorizing out common terms
1125/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1126/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1127/// Returns the simplified value, or null if it didn't simplify.
1129 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1130 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
1131 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
1132 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1133
1134 // Factorization.
1135 if (Value *R = tryFactorizationFolds(I))
1136 return R;
1137
1138 // Expansion.
1139 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1140 // The instruction has the form "(A op' B) op C". See if expanding it out
1141 // to "(A op C) op' (B op C)" results in simplifications.
1142 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1143 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1144
1145 // Disable the use of undef because it's not safe to distribute undef.
1146 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1147 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1148 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1149
1150 // Do "A op C" and "B op C" both simplify?
1151 if (L && R) {
1152 // They do! Return "L op' R".
1153 ++NumExpand;
1154 C = Builder.CreateBinOp(InnerOpcode, L, R);
1155 C->takeName(&I);
1156 return C;
1157 }
1158
1159 // Does "A op C" simplify to the identity value for the inner opcode?
1160 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1161 // They do! Return "B op C".
1162 ++NumExpand;
1163 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1164 C->takeName(&I);
1165 return C;
1166 }
1167
1168 // Does "B op C" simplify to the identity value for the inner opcode?
1169 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1170 // They do! Return "A op C".
1171 ++NumExpand;
1172 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1173 C->takeName(&I);
1174 return C;
1175 }
1176 }
1177
1178 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1179 // The instruction has the form "A op (B op' C)". See if expanding it out
1180 // to "(A op B) op' (A op C)" results in simplifications.
1181 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1182 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1183
1184 // Disable the use of undef because it's not safe to distribute undef.
1185 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1186 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1187 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1188
1189 // Do "A op B" and "A op C" both simplify?
1190 if (L && R) {
1191 // They do! Return "L op' R".
1192 ++NumExpand;
1193 A = Builder.CreateBinOp(InnerOpcode, L, R);
1194 A->takeName(&I);
1195 return A;
1196 }
1197
1198 // Does "A op B" simplify to the identity value for the inner opcode?
1199 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1200 // They do! Return "A op C".
1201 ++NumExpand;
1202 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1203 A->takeName(&I);
1204 return A;
1205 }
1206
1207 // Does "A op C" simplify to the identity value for the inner opcode?
1208 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1209 // They do! Return "A op B".
1210 ++NumExpand;
1211 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1212 A->takeName(&I);
1213 return A;
1214 }
1215 }
1216
1218}
1219
1220static std::optional<std::pair<Value *, Value *>>
1222 if (LHS->getParent() != RHS->getParent())
1223 return std::nullopt;
1224
1225 if (LHS->getNumIncomingValues() < 2)
1226 return std::nullopt;
1227
1228 if (!equal(LHS->blocks(), RHS->blocks()))
1229 return std::nullopt;
1230
1231 Value *L0 = LHS->getIncomingValue(0);
1232 Value *R0 = RHS->getIncomingValue(0);
1233
1234 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1235 Value *L1 = LHS->getIncomingValue(I);
1236 Value *R1 = RHS->getIncomingValue(I);
1237
1238 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1239 continue;
1240
1241 return std::nullopt;
1242 }
1243
1244 return std::optional(std::pair(L0, R0));
1245}
1246
1247std::optional<std::pair<Value *, Value *>>
1248InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1249 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1250 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1251 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1252 return std::nullopt;
1253 switch (LHSInst->getOpcode()) {
1254 case Instruction::PHI:
1255 return matchSymmetricPhiNodesPair(cast<PHINode>(LHS), cast<PHINode>(RHS));
1256 case Instruction::Select: {
1257 Value *Cond = LHSInst->getOperand(0);
1258 Value *TrueVal = LHSInst->getOperand(1);
1259 Value *FalseVal = LHSInst->getOperand(2);
1260 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1261 FalseVal == RHSInst->getOperand(1))
1262 return std::pair(TrueVal, FalseVal);
1263 return std::nullopt;
1264 }
1265 case Instruction::Call: {
1266 // Match min(a, b) and max(a, b)
1267 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1268 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1269 if (LHSMinMax && RHSMinMax &&
1270 LHSMinMax->getPredicate() ==
1272 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1273 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1274 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1275 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1276 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1277 return std::nullopt;
1278 }
1279 default:
1280 return std::nullopt;
1281 }
1282}
1283
1285 Value *LHS,
1286 Value *RHS) {
1287 Value *A, *B, *C, *D, *E, *F;
1288 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1289 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1290 if (!LHSIsSelect && !RHSIsSelect)
1291 return nullptr;
1292
1293 FastMathFlags FMF;
1295 if (isa<FPMathOperator>(&I)) {
1296 FMF = I.getFastMathFlags();
1298 }
1299
1300 Instruction::BinaryOps Opcode = I.getOpcode();
1302
1303 Value *Cond, *True = nullptr, *False = nullptr;
1304
1305 // Special-case for add/negate combination. Replace the zero in the negation
1306 // with the trailing add operand:
1307 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1308 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1309 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1310 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1311 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1312 return nullptr;
1313
1314 Value *N;
1315 if (True && match(FVal, m_Neg(m_Value(N)))) {
1316 Value *Sub = Builder.CreateSub(Z, N);
1317 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1318 }
1319 if (False && match(TVal, m_Neg(m_Value(N)))) {
1320 Value *Sub = Builder.CreateSub(Z, N);
1321 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1322 }
1323 return nullptr;
1324 };
1325
1326 if (LHSIsSelect && RHSIsSelect && A == D) {
1327 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1328 Cond = A;
1329 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1330 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1331
1332 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1333 if (False && !True)
1334 True = Builder.CreateBinOp(Opcode, B, E);
1335 else if (True && !False)
1336 False = Builder.CreateBinOp(Opcode, C, F);
1337 }
1338 } else if (LHSIsSelect && LHS->hasOneUse()) {
1339 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1340 Cond = A;
1341 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1342 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1343 if (Value *NewSel = foldAddNegate(B, C, RHS))
1344 return NewSel;
1345 } else if (RHSIsSelect && RHS->hasOneUse()) {
1346 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1347 Cond = D;
1348 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1349 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1350 if (Value *NewSel = foldAddNegate(E, F, LHS))
1351 return NewSel;
1352 }
1353
1354 if (!True || !False)
1355 return nullptr;
1356
1357 Value *SI = Builder.CreateSelect(Cond, True, False);
1358 SI->takeName(&I);
1359 return SI;
1360}
1361
1362/// Freely adapt every user of V as-if V was changed to !V.
1363/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1365 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1366 for (User *U : make_early_inc_range(I->users())) {
1367 if (U == IgnoredUser)
1368 continue; // Don't consider this user.
1369 switch (cast<Instruction>(U)->getOpcode()) {
1370 case Instruction::Select: {
1371 auto *SI = cast<SelectInst>(U);
1372 SI->swapValues();
1373 SI->swapProfMetadata();
1374 break;
1375 }
1376 case Instruction::Br: {
1377 BranchInst *BI = cast<BranchInst>(U);
1378 BI->swapSuccessors(); // swaps prof metadata too
1379 if (BPI)
1381 break;
1382 }
1383 case Instruction::Xor:
1384 replaceInstUsesWith(cast<Instruction>(*U), I);
1385 // Add to worklist for DCE.
1386 addToWorklist(cast<Instruction>(U));
1387 break;
1388 default:
1389 llvm_unreachable("Got unexpected user - out of sync with "
1390 "canFreelyInvertAllUsersOf() ?");
1391 }
1392 }
1393}
1394
1395/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1396/// constant zero (which is the 'negate' form).
1397Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1398 Value *NegV;
1399 if (match(V, m_Neg(m_Value(NegV))))
1400 return NegV;
1401
1402 // Constants can be considered to be negated values if they can be folded.
1403 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
1404 return ConstantExpr::getNeg(C);
1405
1406 if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
1407 if (C->getType()->getElementType()->isIntegerTy())
1408 return ConstantExpr::getNeg(C);
1409
1410 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
1411 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1412 Constant *Elt = CV->getAggregateElement(i);
1413 if (!Elt)
1414 return nullptr;
1415
1416 if (isa<UndefValue>(Elt))
1417 continue;
1418
1419 if (!isa<ConstantInt>(Elt))
1420 return nullptr;
1421 }
1422 return ConstantExpr::getNeg(CV);
1423 }
1424
1425 // Negate integer vector splats.
1426 if (auto *CV = dyn_cast<Constant>(V))
1427 if (CV->getType()->isVectorTy() &&
1428 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1429 return ConstantExpr::getNeg(CV);
1430
1431 return nullptr;
1432}
1433
1434// Try to fold:
1435// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1436// -> ({s|u}itofp (int_binop x, y))
1437// 2) (fp_binop ({s|u}itofp x), FpC)
1438// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1439//
1440// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1441Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1442 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1444
1445 Type *FPTy = BO.getType();
1446 Type *IntTy = IntOps[0]->getType();
1447
1448 unsigned IntSz = IntTy->getScalarSizeInBits();
1449 // This is the maximum number of inuse bits by the integer where the int -> fp
1450 // casts are exact.
1451 unsigned MaxRepresentableBits =
1453
1454 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1455 // checks later on.
1456 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1457
1458 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1459 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1460 auto IsNonZero = [&](unsigned OpNo) -> bool {
1461 if (OpsKnown[OpNo].hasKnownBits() &&
1462 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1463 return true;
1464 return isKnownNonZero(IntOps[OpNo], SQ);
1465 };
1466
1467 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1468 // NB: This matches the impl in ValueTracking, we just try to use cached
1469 // knownbits here. If we ever start supporting WithCache for
1470 // `isKnownNonNegative`, change this to an explicit call.
1471 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1472 };
1473
1474 // Check if we know for certain that ({s|u}itofp op) is exact.
1475 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1476 // Can we treat this operand as the desired sign?
1477 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1478 !IsNonNeg(OpNo))
1479 return false;
1480
1481 // If fp precision >= bitwidth(op) then its exact.
1482 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1483 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1484 // handled specially. We can't, however, increase the bound arbitrarily for
1485 // `sitofp` as for larger sizes, it won't sign extend.
1486 if (MaxRepresentableBits < IntSz) {
1487 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1488 // numSignBits(op).
1489 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1490 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1491 if (OpsFromSigned)
1492 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1493 // Finally for unsigned check that fp precision >= bitwidth(op) -
1494 // numLeadingZeros(op).
1495 else {
1496 NumUsedLeadingBits[OpNo] =
1497 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1498 }
1499 }
1500 // NB: We could also check if op is known to be a power of 2 or zero (which
1501 // will always be representable). Its unlikely, however, that is we are
1502 // unable to bound op in any way we will be able to pass the overflow checks
1503 // later on.
1504
1505 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1506 return false;
1507 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1508 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1509 IsNonZero(OpNo);
1510 };
1511
1512 // If we have a constant rhs, see if we can losslessly convert it to an int.
1513 if (Op1FpC != nullptr) {
1514 // Signed + Mul req non-zero
1515 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1516 !match(Op1FpC, m_NonZeroFP()))
1517 return nullptr;
1518
1520 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1521 IntTy, DL);
1522 if (Op1IntC == nullptr)
1523 return nullptr;
1524 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1525 : Instruction::UIToFP,
1526 Op1IntC, FPTy, DL) != Op1FpC)
1527 return nullptr;
1528
1529 // First try to keep sign of cast the same.
1530 IntOps[1] = Op1IntC;
1531 }
1532
1533 // Ensure lhs/rhs integer types match.
1534 if (IntTy != IntOps[1]->getType())
1535 return nullptr;
1536
1537 if (Op1FpC == nullptr) {
1538 if (!IsValidPromotion(1))
1539 return nullptr;
1540 }
1541 if (!IsValidPromotion(0))
1542 return nullptr;
1543
1544 // Final we check if the integer version of the binop will not overflow.
1546 // Because of the precision check, we can often rule out overflows.
1547 bool NeedsOverflowCheck = true;
1548 // Try to conservatively rule out overflow based on the already done precision
1549 // checks.
1550 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1551 unsigned OverflowMaxCurBits =
1552 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1553 bool OutputSigned = OpsFromSigned;
1554 switch (BO.getOpcode()) {
1555 case Instruction::FAdd:
1556 IntOpc = Instruction::Add;
1557 OverflowMaxOutputBits += OverflowMaxCurBits;
1558 break;
1559 case Instruction::FSub:
1560 IntOpc = Instruction::Sub;
1561 OverflowMaxOutputBits += OverflowMaxCurBits;
1562 break;
1563 case Instruction::FMul:
1564 IntOpc = Instruction::Mul;
1565 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1566 break;
1567 default:
1568 llvm_unreachable("Unsupported binop");
1569 }
1570 // The precision check may have already ruled out overflow.
1571 if (OverflowMaxOutputBits < IntSz) {
1572 NeedsOverflowCheck = false;
1573 // We can bound unsigned overflow from sub to in range signed value (this is
1574 // what allows us to avoid the overflow check for sub).
1575 if (IntOpc == Instruction::Sub)
1576 OutputSigned = true;
1577 }
1578
1579 // Precision check did not rule out overflow, so need to check.
1580 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1581 // `IntOps[...]` arguments to `KnownOps[...]`.
1582 if (NeedsOverflowCheck &&
1583 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1584 return nullptr;
1585
1586 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1587 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1588 IntBO->setHasNoSignedWrap(OutputSigned);
1589 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1590 }
1591 if (OutputSigned)
1592 return new SIToFPInst(IntBinOp, FPTy);
1593 return new UIToFPInst(IntBinOp, FPTy);
1594}
1595
1596// Try to fold:
1597// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1598// -> ({s|u}itofp (int_binop x, y))
1599// 2) (fp_binop ({s|u}itofp x), FpC)
1600// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1601Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1602 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1603 Constant *Op1FpC = nullptr;
1604 // Check for:
1605 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1606 // 2) (binop ({s|u}itofp x), FpC)
1607 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1608 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1609 return nullptr;
1610
1611 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1612 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1613 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1614 return nullptr;
1615
1616 // Cache KnownBits a bit to potentially save some analysis.
1617 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1618
1619 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1620 // different constraints depending on the sign of the cast.
1621 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1622 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1623 IntOps, Op1FpC, OpsKnown))
1624 return R;
1625 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1626 Op1FpC, OpsKnown);
1627}
1628
1629/// A binop with a constant operand and a sign-extended boolean operand may be
1630/// converted into a select of constants by applying the binary operation to
1631/// the constant with the two possible values of the extended boolean (0 or -1).
1632Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1633 // TODO: Handle non-commutative binop (constant is operand 0).
1634 // TODO: Handle zext.
1635 // TODO: Peek through 'not' of cast.
1636 Value *BO0 = BO.getOperand(0);
1637 Value *BO1 = BO.getOperand(1);
1638 Value *X;
1639 Constant *C;
1640 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1641 !X->getType()->isIntOrIntVectorTy(1))
1642 return nullptr;
1643
1644 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1647 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1648 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1649 return SelectInst::Create(X, TVal, FVal);
1650}
1651
1653 bool IsTrueArm) {
1655 for (Value *Op : I.operands()) {
1656 Value *V = nullptr;
1657 if (Op == SI) {
1658 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1659 } else if (match(SI->getCondition(),
1662 m_Specific(Op), m_Value(V))) &&
1664 // Pass
1665 } else {
1666 V = Op;
1667 }
1668 Ops.push_back(V);
1669 }
1670
1671 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1672}
1673
1675 Value *NewOp, InstCombiner &IC) {
1676 Instruction *Clone = I.clone();
1677 Clone->replaceUsesOfWith(SI, NewOp);
1679 IC.InsertNewInstBefore(Clone, I.getIterator());
1680 return Clone;
1681}
1682
1684 bool FoldWithMultiUse) {
1685 // Don't modify shared select instructions unless set FoldWithMultiUse
1686 if (!SI->hasOneUse() && !FoldWithMultiUse)
1687 return nullptr;
1688
1689 Value *TV = SI->getTrueValue();
1690 Value *FV = SI->getFalseValue();
1691
1692 // Bool selects with constant operands can be folded to logical ops.
1693 if (SI->getType()->isIntOrIntVectorTy(1))
1694 return nullptr;
1695
1696 // Test if a FCmpInst instruction is used exclusively by a select as
1697 // part of a minimum or maximum operation. If so, refrain from doing
1698 // any other folding. This helps out other analyses which understand
1699 // non-obfuscated minimum and maximum idioms. And in this case, at
1700 // least one of the comparison operands has at least one user besides
1701 // the compare (the select), which would often largely negate the
1702 // benefit of folding anyway.
1703 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1704 if (CI->hasOneUse()) {
1705 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1706 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1707 return nullptr;
1708 }
1709 }
1710
1711 // Make sure that one of the select arms folds successfully.
1712 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1713 Value *NewFV =
1714 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1715 if (!NewTV && !NewFV)
1716 return nullptr;
1717
1718 // Create an instruction for the arm that did not fold.
1719 if (!NewTV)
1720 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1721 if (!NewFV)
1722 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1723 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1724}
1725
1727 Value *InValue, BasicBlock *InBB,
1728 const DataLayout &DL,
1729 const SimplifyQuery SQ) {
1730 // NB: It is a precondition of this transform that the operands be
1731 // phi translatable!
1733 for (Value *Op : I.operands()) {
1734 if (Op == PN)
1735 Ops.push_back(InValue);
1736 else
1737 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1738 }
1739
1740 // Don't consider the simplification successful if we get back a constant
1741 // expression. That's just an instruction in hiding.
1742 // Also reject the case where we simplify back to the phi node. We wouldn't
1743 // be able to remove it in that case.
1745 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1746 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1747 return NewVal;
1748
1749 // Check if incoming PHI value can be replaced with constant
1750 // based on implied condition.
1751 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1752 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1753 if (TerminatorBI && TerminatorBI->isConditional() &&
1754 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1755 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1756 std::optional<bool> ImpliedCond = isImpliedCondition(
1757 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1758 DL, LHSIsTrue);
1759 if (ImpliedCond)
1760 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1761 }
1762
1763 return nullptr;
1764}
1765
1767 bool AllowMultipleUses) {
1768 unsigned NumPHIValues = PN->getNumIncomingValues();
1769 if (NumPHIValues == 0)
1770 return nullptr;
1771
1772 // We normally only transform phis with a single use. However, if a PHI has
1773 // multiple uses and they are all the same operation, we can fold *all* of the
1774 // uses into the PHI.
1775 bool OneUse = PN->hasOneUse();
1776 bool IdenticalUsers = false;
1777 if (!AllowMultipleUses && !OneUse) {
1778 // Walk the use list for the instruction, comparing them to I.
1779 for (User *U : PN->users()) {
1780 Instruction *UI = cast<Instruction>(U);
1781 if (UI != &I && !I.isIdenticalTo(UI))
1782 return nullptr;
1783 }
1784 // Otherwise, we can replace *all* users with the new PHI we form.
1785 IdenticalUsers = true;
1786 }
1787
1788 // Check that all operands are phi-translatable.
1789 for (Value *Op : I.operands()) {
1790 if (Op == PN)
1791 continue;
1792
1793 // Non-instructions never require phi-translation.
1794 auto *I = dyn_cast<Instruction>(Op);
1795 if (!I)
1796 continue;
1797
1798 // Phi-translate can handle phi nodes in the same block.
1799 if (isa<PHINode>(I))
1800 if (I->getParent() == PN->getParent())
1801 continue;
1802
1803 // Operand dominates the block, no phi-translation necessary.
1804 if (DT.dominates(I, PN->getParent()))
1805 continue;
1806
1807 // Not phi-translatable, bail out.
1808 return nullptr;
1809 }
1810
1811 // Check to see whether the instruction can be folded into each phi operand.
1812 // If there is one operand that does not fold, remember the BB it is in.
1813 SmallVector<Value *> NewPhiValues;
1814 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1815 bool SeenNonSimplifiedInVal = false;
1816 for (unsigned i = 0; i != NumPHIValues; ++i) {
1817 Value *InVal = PN->getIncomingValue(i);
1818 BasicBlock *InBB = PN->getIncomingBlock(i);
1819
1820 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1821 NewPhiValues.push_back(NewVal);
1822 continue;
1823 }
1824
1825 // Handle some cases that can't be fully simplified, but where we know that
1826 // the two instructions will fold into one.
1827 auto WillFold = [&]() {
1828 if (!InVal->hasOneUser())
1829 return false;
1830
1831 // icmp of ucmp/scmp with constant will fold to icmp.
1832 const APInt *Ignored;
1833 if (isa<CmpIntrinsic>(InVal) &&
1834 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1835 return true;
1836
1837 // icmp eq zext(bool), 0 will fold to !bool.
1838 if (isa<ZExtInst>(InVal) &&
1839 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1840 match(&I,
1842 return true;
1843
1844 return false;
1845 };
1846
1847 if (WillFold()) {
1848 OpsToMoveUseToIncomingBB.push_back(i);
1849 NewPhiValues.push_back(nullptr);
1850 continue;
1851 }
1852
1853 if (!OneUse && !IdenticalUsers)
1854 return nullptr;
1855
1856 if (SeenNonSimplifiedInVal)
1857 return nullptr; // More than one non-simplified value.
1858 SeenNonSimplifiedInVal = true;
1859
1860 // If there is exactly one non-simplified value, we can insert a copy of the
1861 // operation in that block. However, if this is a critical edge, we would
1862 // be inserting the computation on some other paths (e.g. inside a loop).
1863 // Only do this if the pred block is unconditionally branching into the phi
1864 // block. Also, make sure that the pred block is not dead code.
1865 BranchInst *BI = dyn_cast<BranchInst>(InBB->getTerminator());
1866 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1867 return nullptr;
1868
1869 NewPhiValues.push_back(nullptr);
1870 OpsToMoveUseToIncomingBB.push_back(i);
1871
1872 // If the InVal is an invoke at the end of the pred block, then we can't
1873 // insert a computation after it without breaking the edge.
1874 if (isa<InvokeInst>(InVal))
1875 if (cast<Instruction>(InVal)->getParent() == InBB)
1876 return nullptr;
1877
1878 // Do not push the operation across a loop backedge. This could result in
1879 // an infinite combine loop, and is generally non-profitable (especially
1880 // if the operation was originally outside the loop).
1881 if (isBackEdge(InBB, PN->getParent()))
1882 return nullptr;
1883 }
1884
1885 // Clone the instruction that uses the phi node and move it into the incoming
1886 // BB because we know that the next iteration of InstCombine will simplify it.
1888 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1890 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1891
1892 Instruction *Clone = Clones.lookup(OpBB);
1893 if (!Clone) {
1894 Clone = I.clone();
1895 for (Use &U : Clone->operands()) {
1896 if (U == PN)
1897 U = Op;
1898 else
1899 U = U->DoPHITranslation(PN->getParent(), OpBB);
1900 }
1901 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1902 Clones.insert({OpBB, Clone});
1903 }
1904
1905 NewPhiValues[OpIndex] = Clone;
1906 }
1907
1908 // Okay, we can do the transformation: create the new PHI node.
1909 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1910 InsertNewInstBefore(NewPN, PN->getIterator());
1911 NewPN->takeName(PN);
1912 NewPN->setDebugLoc(PN->getDebugLoc());
1913
1914 for (unsigned i = 0; i != NumPHIValues; ++i)
1915 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
1916
1917 if (IdenticalUsers) {
1918 for (User *U : make_early_inc_range(PN->users())) {
1919 Instruction *User = cast<Instruction>(U);
1920 if (User == &I)
1921 continue;
1922 replaceInstUsesWith(*User, NewPN);
1924 }
1925 OneUse = true;
1926 }
1927
1928 if (OneUse) {
1929 replaceAllDbgUsesWith(const_cast<PHINode &>(*PN),
1930 const_cast<PHINode &>(*NewPN),
1931 const_cast<PHINode &>(*PN), DT);
1932 }
1933 return replaceInstUsesWith(I, NewPN);
1934}
1935
1937 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1938 // we are guarding against replicating the binop in >1 predecessor.
1939 // This could miss matching a phi with 2 constant incoming values.
1940 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1941 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1942 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1943 Phi0->getNumOperands() != Phi1->getNumOperands())
1944 return nullptr;
1945
1946 // TODO: Remove the restriction for binop being in the same block as the phis.
1947 if (BO.getParent() != Phi0->getParent() ||
1948 BO.getParent() != Phi1->getParent())
1949 return nullptr;
1950
1951 // Fold if there is at least one specific constant value in phi0 or phi1's
1952 // incoming values that comes from the same block and this specific constant
1953 // value can be used to do optimization for specific binary operator.
1954 // For example:
1955 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
1956 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
1957 // %add = add i32 %phi0, %phi1
1958 // ==>
1959 // %add = phi i32 [%j, %bb0], [%i, %bb1]
1961 /*AllowRHSConstant*/ false);
1962 if (C) {
1963 SmallVector<Value *, 4> NewIncomingValues;
1964 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
1965 auto &Phi0Use = std::get<0>(T);
1966 auto &Phi1Use = std::get<1>(T);
1967 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1968 return false;
1969 Value *Phi0UseV = Phi0Use.get();
1970 Value *Phi1UseV = Phi1Use.get();
1971 if (Phi0UseV == C)
1972 NewIncomingValues.push_back(Phi1UseV);
1973 else if (Phi1UseV == C)
1974 NewIncomingValues.push_back(Phi0UseV);
1975 else
1976 return false;
1977 return true;
1978 };
1979
1980 if (all_of(zip(Phi0->operands(), Phi1->operands()),
1981 CanFoldIncomingValuePair)) {
1982 PHINode *NewPhi =
1983 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
1984 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
1985 "The number of collected incoming values should equal the number "
1986 "of the original PHINode operands!");
1987 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
1988 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
1989 return NewPhi;
1990 }
1991 }
1992
1993 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1994 return nullptr;
1995
1996 // Match a pair of incoming constants for one of the predecessor blocks.
1997 BasicBlock *ConstBB, *OtherBB;
1998 Constant *C0, *C1;
1999 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2000 ConstBB = Phi0->getIncomingBlock(0);
2001 OtherBB = Phi0->getIncomingBlock(1);
2002 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2003 ConstBB = Phi0->getIncomingBlock(1);
2004 OtherBB = Phi0->getIncomingBlock(0);
2005 } else {
2006 return nullptr;
2007 }
2008 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2009 return nullptr;
2010
2011 // The block that we are hoisting to must reach here unconditionally.
2012 // Otherwise, we could be speculatively executing an expensive or
2013 // non-speculative op.
2014 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2015 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2016 !DT.isReachableFromEntry(OtherBB))
2017 return nullptr;
2018
2019 // TODO: This check could be tightened to only apply to binops (div/rem) that
2020 // are not safe to speculatively execute. But that could allow hoisting
2021 // potentially expensive instructions (fdiv for example).
2022 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2024 return nullptr;
2025
2026 // Fold constants for the predecessor block with constant incoming values.
2027 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2028 if (!NewC)
2029 return nullptr;
2030
2031 // Make a new binop in the predecessor block with the non-constant incoming
2032 // values.
2033 Builder.SetInsertPoint(PredBlockBranch);
2034 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2035 Phi0->getIncomingValueForBlock(OtherBB),
2036 Phi1->getIncomingValueForBlock(OtherBB));
2037 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2038 NotFoldedNewBO->copyIRFlags(&BO);
2039
2040 // Replace the binop with a phi of the new values. The old phis are dead.
2041 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2042 NewPhi->addIncoming(NewBO, OtherBB);
2043 NewPhi->addIncoming(NewC, ConstBB);
2044 return NewPhi;
2045}
2046
2048 if (!isa<Constant>(I.getOperand(1)))
2049 return nullptr;
2050
2051 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2052 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2053 return NewSel;
2054 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2055 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2056 return NewPhi;
2057 }
2058 return nullptr;
2059}
2060
2062 // If this GEP has only 0 indices, it is the same pointer as
2063 // Src. If Src is not a trivial GEP too, don't combine
2064 // the indices.
2065 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2066 !Src.hasOneUse())
2067 return false;
2068 return true;
2069}
2070
2072 if (!isa<VectorType>(Inst.getType()))
2073 return nullptr;
2074
2075 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2076 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2077 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2078 cast<VectorType>(Inst.getType())->getElementCount());
2079 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2080 cast<VectorType>(Inst.getType())->getElementCount());
2081
2082 // If both operands of the binop are vector concatenations, then perform the
2083 // narrow binop on each pair of the source operands followed by concatenation
2084 // of the results.
2085 Value *L0, *L1, *R0, *R1;
2086 ArrayRef<int> Mask;
2087 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2088 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2089 LHS->hasOneUse() && RHS->hasOneUse() &&
2090 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2091 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2092 // This transform does not have the speculative execution constraint as
2093 // below because the shuffle is a concatenation. The new binops are
2094 // operating on exactly the same elements as the existing binop.
2095 // TODO: We could ease the mask requirement to allow different undef lanes,
2096 // but that requires an analysis of the binop-with-undef output value.
2097 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2098 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2099 BO->copyIRFlags(&Inst);
2100 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2101 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2102 BO->copyIRFlags(&Inst);
2103 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2104 }
2105
2106 auto createBinOpReverse = [&](Value *X, Value *Y) {
2107 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2108 if (auto *BO = dyn_cast<BinaryOperator>(V))
2109 BO->copyIRFlags(&Inst);
2110 Module *M = Inst.getModule();
2112 M, Intrinsic::vector_reverse, V->getType());
2113 return CallInst::Create(F, V);
2114 };
2115
2116 // NOTE: Reverse shuffles don't require the speculative execution protection
2117 // below because they don't affect which lanes take part in the computation.
2118
2119 Value *V1, *V2;
2120 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2121 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2122 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2123 (LHS->hasOneUse() || RHS->hasOneUse() ||
2124 (LHS == RHS && LHS->hasNUses(2))))
2125 return createBinOpReverse(V1, V2);
2126
2127 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2128 if (LHS->hasOneUse() && isSplatValue(RHS))
2129 return createBinOpReverse(V1, RHS);
2130 }
2131 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2132 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2133 return createBinOpReverse(LHS, V2);
2134
2135 // It may not be safe to reorder shuffles and things like div, urem, etc.
2136 // because we may trap when executing those ops on unknown vector elements.
2137 // See PR20059.
2139 return nullptr;
2140
2141 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2142 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2143 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2144 BO->copyIRFlags(&Inst);
2145 return new ShuffleVectorInst(XY, M);
2146 };
2147
2148 // If both arguments of the binary operation are shuffles that use the same
2149 // mask and shuffle within a single vector, move the shuffle after the binop.
2150 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2151 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2152 V1->getType() == V2->getType() &&
2153 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2154 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2155 return createBinOpShuffle(V1, V2, Mask);
2156 }
2157
2158 // If both arguments of a commutative binop are select-shuffles that use the
2159 // same mask with commuted operands, the shuffles are unnecessary.
2160 if (Inst.isCommutative() &&
2161 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2162 match(RHS,
2163 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2164 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2165 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2166 // TODO: Allow shuffles that contain undefs in the mask?
2167 // That is legal, but it reduces undef knowledge.
2168 // TODO: Allow arbitrary shuffles by shuffling after binop?
2169 // That might be legal, but we have to deal with poison.
2170 if (LShuf->isSelect() &&
2171 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2172 RShuf->isSelect() &&
2173 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2174 // Example:
2175 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2176 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2177 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2178 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2179 NewBO->copyIRFlags(&Inst);
2180 return NewBO;
2181 }
2182 }
2183
2184 // If one argument is a shuffle within one vector and the other is a constant,
2185 // try moving the shuffle after the binary operation. This canonicalization
2186 // intends to move shuffles closer to other shuffles and binops closer to
2187 // other binops, so they can be folded. It may also enable demanded elements
2188 // transforms.
2189 Constant *C;
2190 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
2191 if (InstVTy &&
2193 m_Mask(Mask))),
2194 m_ImmConstant(C))) &&
2195 cast<FixedVectorType>(V1->getType())->getNumElements() <=
2196 InstVTy->getNumElements()) {
2197 assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
2198 "Shuffle should not change scalar type");
2199
2200 // Find constant NewC that has property:
2201 // shuffle(NewC, ShMask) = C
2202 // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
2203 // reorder is not possible. A 1-to-1 mapping is not required. Example:
2204 // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
2205 bool ConstOp1 = isa<Constant>(RHS);
2206 ArrayRef<int> ShMask = Mask;
2207 unsigned SrcVecNumElts =
2208 cast<FixedVectorType>(V1->getType())->getNumElements();
2209 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2210 SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, PoisonScalar);
2211 bool MayChange = true;
2212 unsigned NumElts = InstVTy->getNumElements();
2213 for (unsigned I = 0; I < NumElts; ++I) {
2214 Constant *CElt = C->getAggregateElement(I);
2215 if (ShMask[I] >= 0) {
2216 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2217 Constant *NewCElt = NewVecC[ShMask[I]];
2218 // Bail out if:
2219 // 1. The constant vector contains a constant expression.
2220 // 2. The shuffle needs an element of the constant vector that can't
2221 // be mapped to a new constant vector.
2222 // 3. This is a widening shuffle that copies elements of V1 into the
2223 // extended elements (extending with poison is allowed).
2224 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2225 I >= SrcVecNumElts) {
2226 MayChange = false;
2227 break;
2228 }
2229 NewVecC[ShMask[I]] = CElt;
2230 }
2231 // If this is a widening shuffle, we must be able to extend with poison
2232 // elements. If the original binop does not produce a poison in the high
2233 // lanes, then this transform is not safe.
2234 // Similarly for poison lanes due to the shuffle mask, we can only
2235 // transform binops that preserve poison.
2236 // TODO: We could shuffle those non-poison constant values into the
2237 // result by using a constant vector (rather than an poison vector)
2238 // as operand 1 of the new binop, but that might be too aggressive
2239 // for target-independent shuffle creation.
2240 if (I >= SrcVecNumElts || ShMask[I] < 0) {
2241 Constant *MaybePoison =
2242 ConstOp1
2243 ? ConstantFoldBinaryOpOperands(Opcode, PoisonScalar, CElt, DL)
2244 : ConstantFoldBinaryOpOperands(Opcode, CElt, PoisonScalar, DL);
2245 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2246 MayChange = false;
2247 break;
2248 }
2249 }
2250 }
2251 if (MayChange) {
2252 Constant *NewC = ConstantVector::get(NewVecC);
2253 // It may not be safe to execute a binop on a vector with poison elements
2254 // because the entire instruction can be folded to undef or create poison
2255 // that did not exist in the original code.
2256 // TODO: The shift case should not be necessary.
2257 if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
2258 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2259
2260 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2261 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2262 Value *NewLHS = ConstOp1 ? V1 : NewC;
2263 Value *NewRHS = ConstOp1 ? NewC : V1;
2264 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2265 }
2266 }
2267
2268 // Try to reassociate to sink a splat shuffle after a binary operation.
2269 if (Inst.isAssociative() && Inst.isCommutative()) {
2270 // Canonicalize shuffle operand as LHS.
2271 if (isa<ShuffleVectorInst>(RHS))
2272 std::swap(LHS, RHS);
2273
2274 Value *X;
2275 ArrayRef<int> MaskC;
2276 int SplatIndex;
2277 Value *Y, *OtherOp;
2278 if (!match(LHS,
2279 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2280 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2281 X->getType() != Inst.getType() ||
2282 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2283 return nullptr;
2284
2285 // FIXME: This may not be safe if the analysis allows undef elements. By
2286 // moving 'Y' before the splat shuffle, we are implicitly assuming
2287 // that it is not undef/poison at the splat index.
2288 if (isSplatValue(OtherOp, SplatIndex)) {
2289 std::swap(Y, OtherOp);
2290 } else if (!isSplatValue(Y, SplatIndex)) {
2291 return nullptr;
2292 }
2293
2294 // X and Y are splatted values, so perform the binary operation on those
2295 // values followed by a splat followed by the 2nd binary operation:
2296 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2297 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2298 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2299 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2300 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2301
2302 // Intersect FMF on both new binops. Other (poison-generating) flags are
2303 // dropped to be safe.
2304 if (isa<FPMathOperator>(R)) {
2305 R->copyFastMathFlags(&Inst);
2306 R->andIRFlags(RHS);
2307 }
2308 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2309 NewInstBO->copyIRFlags(R);
2310 return R;
2311 }
2312
2313 return nullptr;
2314}
2315
2316/// Try to narrow the width of a binop if at least 1 operand is an extend of
2317/// of a value. This requires a potentially expensive known bits check to make
2318/// sure the narrow op does not overflow.
2319Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2320 // We need at least one extended operand.
2321 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2322
2323 // If this is a sub, we swap the operands since we always want an extension
2324 // on the RHS. The LHS can be an extension or a constant.
2325 if (BO.getOpcode() == Instruction::Sub)
2326 std::swap(Op0, Op1);
2327
2328 Value *X;
2329 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2330 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2331 return nullptr;
2332
2333 // If both operands are the same extension from the same source type and we
2334 // can eliminate at least one (hasOneUse), this might work.
2335 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2336 Value *Y;
2337 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2338 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2339 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2340 // If that did not match, see if we have a suitable constant operand.
2341 // Truncating and extending must produce the same constant.
2342 Constant *WideC;
2343 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2344 return nullptr;
2345 Constant *NarrowC = getLosslessTrunc(WideC, X->getType(), CastOpc);
2346 if (!NarrowC)
2347 return nullptr;
2348 Y = NarrowC;
2349 }
2350
2351 // Swap back now that we found our operands.
2352 if (BO.getOpcode() == Instruction::Sub)
2353 std::swap(X, Y);
2354
2355 // Both operands have narrow versions. Last step: the math must not overflow
2356 // in the narrow width.
2357 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2358 return nullptr;
2359
2360 // bo (ext X), (ext Y) --> ext (bo X, Y)
2361 // bo (ext X), C --> ext (bo X, C')
2362 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2363 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2364 if (IsSext)
2365 NewBinOp->setHasNoSignedWrap();
2366 else
2367 NewBinOp->setHasNoUnsignedWrap();
2368 }
2369 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2370}
2371
2372/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2373/// transform.
2375 GEPOperator &GEP2) {
2377}
2378
2379/// Thread a GEP operation with constant indices through the constant true/false
2380/// arms of a select.
2382 InstCombiner::BuilderTy &Builder) {
2383 if (!GEP.hasAllConstantIndices())
2384 return nullptr;
2385
2386 Instruction *Sel;
2387 Value *Cond;
2388 Constant *TrueC, *FalseC;
2389 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2390 !match(Sel,
2391 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2392 return nullptr;
2393
2394 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2395 // Propagate 'inbounds' and metadata from existing instructions.
2396 // Note: using IRBuilder to create the constants for efficiency.
2397 SmallVector<Value *, 4> IndexC(GEP.indices());
2398 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2399 Type *Ty = GEP.getSourceElementType();
2400 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2401 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2402 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2403}
2404
2405// Canonicalization:
2406// gep T, (gep i8, base, C1), (Index + C2) into
2407// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2409 GEPOperator *Src,
2410 InstCombinerImpl &IC) {
2411 if (GEP.getNumIndices() != 1)
2412 return nullptr;
2413 auto &DL = IC.getDataLayout();
2414 Value *Base;
2415 const APInt *C1;
2416 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2417 return nullptr;
2418 Value *VarIndex;
2419 const APInt *C2;
2420 Type *PtrTy = Src->getType()->getScalarType();
2421 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2422 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2423 return nullptr;
2424 if (C1->getBitWidth() != IndexSizeInBits ||
2425 C2->getBitWidth() != IndexSizeInBits)
2426 return nullptr;
2427 Type *BaseType = GEP.getSourceElementType();
2428 if (isa<ScalableVectorType>(BaseType))
2429 return nullptr;
2430 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2431 APInt NewOffset = TypeSize * *C2 + *C1;
2432 if (NewOffset.isZero() ||
2433 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2434 Value *GEPConst =
2435 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset));
2436 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex);
2437 }
2438
2439 return nullptr;
2440}
2441
2443 GEPOperator *Src) {
2444 // Combine Indices - If the source pointer to this getelementptr instruction
2445 // is a getelementptr instruction with matching element type, combine the
2446 // indices of the two getelementptr instructions into a single instruction.
2447 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2448 return nullptr;
2449
2450 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2451 return I;
2452
2453 // For constant GEPs, use a more general offset-based folding approach.
2454 Type *PtrTy = Src->getType()->getScalarType();
2455 if (GEP.hasAllConstantIndices() &&
2456 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2457 // Split Src into a variable part and a constant suffix.
2459 Type *BaseType = GTI.getIndexedType();
2460 bool IsFirstType = true;
2461 unsigned NumVarIndices = 0;
2462 for (auto Pair : enumerate(Src->indices())) {
2463 if (!isa<ConstantInt>(Pair.value())) {
2464 BaseType = GTI.getIndexedType();
2465 IsFirstType = false;
2466 NumVarIndices = Pair.index() + 1;
2467 }
2468 ++GTI;
2469 }
2470
2471 // Determine the offset for the constant suffix of Src.
2473 if (NumVarIndices != Src->getNumIndices()) {
2474 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2475 if (BaseType->isScalableTy())
2476 return nullptr;
2477
2478 SmallVector<Value *> ConstantIndices;
2479 if (!IsFirstType)
2480 ConstantIndices.push_back(
2482 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2483 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2484 }
2485
2486 // Add the offset for GEP (which is fully constant).
2487 if (!GEP.accumulateConstantOffset(DL, Offset))
2488 return nullptr;
2489
2490 // Convert the total offset back into indices.
2491 SmallVector<APInt> ConstIndices =
2493 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2494 return nullptr;
2495
2496 GEPNoWrapFlags NW = getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP));
2497 SmallVector<Value *> Indices;
2498 append_range(Indices, drop_end(Src->indices(),
2499 Src->getNumIndices() - NumVarIndices));
2500 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2501 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2502 // Even if the total offset is inbounds, we may end up representing it
2503 // by first performing a larger negative offset, and then a smaller
2504 // positive one. The large negative offset might go out of bounds. Only
2505 // preserve inbounds if all signs are the same.
2506 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2508 if (!Idx.isNonNegative())
2509 NW = NW.withoutNoUnsignedWrap();
2510 }
2511
2512 return replaceInstUsesWith(
2513 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2514 Indices, "", NW));
2515 }
2516
2517 if (Src->getResultElementType() != GEP.getSourceElementType())
2518 return nullptr;
2519
2520 SmallVector<Value*, 8> Indices;
2521
2522 // Find out whether the last index in the source GEP is a sequential idx.
2523 bool EndsWithSequential = false;
2524 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2525 I != E; ++I)
2526 EndsWithSequential = I.isSequential();
2527
2528 // Can we combine the two pointer arithmetics offsets?
2529 if (EndsWithSequential) {
2530 // Replace: gep (gep %P, long B), long A, ...
2531 // With: T = long A+B; gep %P, T, ...
2532 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2533 Value *GO1 = GEP.getOperand(1);
2534
2535 // If they aren't the same type, then the input hasn't been processed
2536 // by the loop above yet (which canonicalizes sequential index types to
2537 // intptr_t). Just avoid transforming this until the input has been
2538 // normalized.
2539 if (SO1->getType() != GO1->getType())
2540 return nullptr;
2541
2542 Value *Sum =
2543 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2544 // Only do the combine when we are sure the cost after the
2545 // merge is never more than that before the merge.
2546 if (Sum == nullptr)
2547 return nullptr;
2548
2549 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2550 Indices.push_back(Sum);
2551 Indices.append(GEP.op_begin()+2, GEP.op_end());
2552 } else if (isa<Constant>(*GEP.idx_begin()) &&
2553 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2554 Src->getNumOperands() != 1) {
2555 // Otherwise we can do the fold if the first index of the GEP is a zero
2556 Indices.append(Src->op_begin()+1, Src->op_end());
2557 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2558 }
2559
2560 if (!Indices.empty())
2561 return replaceInstUsesWith(
2563 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2564 getMergedGEPNoWrapFlags(*Src, *cast<GEPOperator>(&GEP))));
2565
2566 return nullptr;
2567}
2568
2570 BuilderTy *Builder,
2571 bool &DoesConsume, unsigned Depth) {
2572 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2573 // ~(~(X)) -> X.
2574 Value *A, *B;
2575 if (match(V, m_Not(m_Value(A)))) {
2576 DoesConsume = true;
2577 return A;
2578 }
2579
2580 Constant *C;
2581 // Constants can be considered to be not'ed values.
2582 if (match(V, m_ImmConstant(C)))
2583 return ConstantExpr::getNot(C);
2584
2586 return nullptr;
2587
2588 // The rest of the cases require that we invert all uses so don't bother
2589 // doing the analysis if we know we can't use the result.
2590 if (!WillInvertAllUses)
2591 return nullptr;
2592
2593 // Compares can be inverted if all of their uses are being modified to use
2594 // the ~V.
2595 if (auto *I = dyn_cast<CmpInst>(V)) {
2596 if (Builder != nullptr)
2597 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2598 I->getOperand(1));
2599 return NonNull;
2600 }
2601
2602 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2603 // `(-1 - B) - A` if we are willing to invert all of the uses.
2604 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2605 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2606 DoesConsume, Depth))
2607 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2608 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2609 DoesConsume, Depth))
2610 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2611 return nullptr;
2612 }
2613
2614 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2615 // into `A ^ B` if we are willing to invert all of the uses.
2616 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2617 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2618 DoesConsume, Depth))
2619 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2620 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2621 DoesConsume, Depth))
2622 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2623 return nullptr;
2624 }
2625
2626 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2627 // `A + (-1 - B)` if we are willing to invert all of the uses.
2628 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2629 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2630 DoesConsume, Depth))
2631 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2632 return nullptr;
2633 }
2634
2635 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2636 // into `A s>> B` if we are willing to invert all of the uses.
2637 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2638 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2639 DoesConsume, Depth))
2640 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2641 return nullptr;
2642 }
2643
2644 Value *Cond;
2645 // LogicOps are special in that we canonicalize them at the cost of an
2646 // instruction.
2647 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2648 !shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(V));
2649 // Selects/min/max with invertible operands are freely invertible
2650 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2651 bool LocalDoesConsume = DoesConsume;
2652 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2653 LocalDoesConsume, Depth))
2654 return nullptr;
2655 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2656 LocalDoesConsume, Depth)) {
2657 DoesConsume = LocalDoesConsume;
2658 if (Builder != nullptr) {
2659 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2660 DoesConsume, Depth);
2661 assert(NotB != nullptr &&
2662 "Unable to build inverted value for known freely invertable op");
2663 if (auto *II = dyn_cast<IntrinsicInst>(V))
2665 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2666 return Builder->CreateSelect(Cond, NotA, NotB);
2667 }
2668 return NonNull;
2669 }
2670 }
2671
2672 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2673 bool LocalDoesConsume = DoesConsume;
2675 for (Use &U : PN->operands()) {
2676 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2677 Value *NewIncomingVal = getFreelyInvertedImpl(
2678 U.get(), /*WillInvertAllUses=*/false,
2679 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2680 if (NewIncomingVal == nullptr)
2681 return nullptr;
2682 // Make sure that we can safely erase the original PHI node.
2683 if (NewIncomingVal == V)
2684 return nullptr;
2685 if (Builder != nullptr)
2686 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2687 }
2688
2689 DoesConsume = LocalDoesConsume;
2690 if (Builder != nullptr) {
2693 PHINode *NewPN =
2694 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2695 for (auto [Val, Pred] : IncomingValues)
2696 NewPN->addIncoming(Val, Pred);
2697 return NewPN;
2698 }
2699 return NonNull;
2700 }
2701
2702 if (match(V, m_SExtLike(m_Value(A)))) {
2703 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2704 DoesConsume, Depth))
2705 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
2706 return nullptr;
2707 }
2708
2709 if (match(V, m_Trunc(m_Value(A)))) {
2710 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2711 DoesConsume, Depth))
2712 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
2713 return nullptr;
2714 }
2715
2716 // De Morgan's Laws:
2717 // (~(A | B)) -> (~A & ~B)
2718 // (~(A & B)) -> (~A | ~B)
2719 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
2720 bool IsLogical, Value *A,
2721 Value *B) -> Value * {
2722 bool LocalDoesConsume = DoesConsume;
2723 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
2724 LocalDoesConsume, Depth))
2725 return nullptr;
2726 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2727 LocalDoesConsume, Depth)) {
2728 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2729 LocalDoesConsume, Depth);
2730 DoesConsume = LocalDoesConsume;
2731 if (IsLogical)
2732 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
2733 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
2734 }
2735
2736 return nullptr;
2737 };
2738
2739 if (match(V, m_Or(m_Value(A), m_Value(B))))
2740 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
2741 B);
2742
2743 if (match(V, m_And(m_Value(A), m_Value(B))))
2744 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
2745 B);
2746
2747 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
2748 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
2749 B);
2750
2751 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
2752 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
2753 B);
2754
2755 return nullptr;
2756}
2757
2758/// Return true if we should canonicalize the gep to an i8 ptradd.
2760 Value *PtrOp = GEP.getOperand(0);
2761 Type *GEPEltType = GEP.getSourceElementType();
2762 if (GEPEltType->isIntegerTy(8))
2763 return false;
2764
2765 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
2766 // intrinsic. This has better support in BasicAA.
2767 if (GEPEltType->isScalableTy())
2768 return true;
2769
2770 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
2771 // together.
2772 if (GEP.getNumIndices() == 1 &&
2773 match(GEP.getOperand(1),
2775 m_Shl(m_Value(), m_ConstantInt())))))
2776 return true;
2777
2778 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
2779 // possibly be merged together.
2780 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2781 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2782 any_of(GEP.indices(), [](Value *V) {
2783 const APInt *C;
2784 return match(V, m_APInt(C)) && !C->isZero();
2785 });
2786}
2787
2789 IRBuilderBase &Builder) {
2790 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2791 if (!Op1)
2792 return nullptr;
2793
2794 // Don't fold a GEP into itself through a PHI node. This can only happen
2795 // through the back-edge of a loop. Folding a GEP into itself means that
2796 // the value of the previous iteration needs to be stored in the meantime,
2797 // thus requiring an additional register variable to be live, but not
2798 // actually achieving anything (the GEP still needs to be executed once per
2799 // loop iteration).
2800 if (Op1 == &GEP)
2801 return nullptr;
2802 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
2803
2804 int DI = -1;
2805
2806 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2807 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2808 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2809 Op1->getSourceElementType() != Op2->getSourceElementType())
2810 return nullptr;
2811
2812 // As for Op1 above, don't try to fold a GEP into itself.
2813 if (Op2 == &GEP)
2814 return nullptr;
2815
2816 // Keep track of the type as we walk the GEP.
2817 Type *CurTy = nullptr;
2818
2819 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2820 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2821 return nullptr;
2822
2823 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2824 if (DI == -1) {
2825 // We have not seen any differences yet in the GEPs feeding the
2826 // PHI yet, so we record this one if it is allowed to be a
2827 // variable.
2828
2829 // The first two arguments can vary for any GEP, the rest have to be
2830 // static for struct slots
2831 if (J > 1) {
2832 assert(CurTy && "No current type?");
2833 if (CurTy->isStructTy())
2834 return nullptr;
2835 }
2836
2837 DI = J;
2838 } else {
2839 // The GEP is different by more than one input. While this could be
2840 // extended to support GEPs that vary by more than one variable it
2841 // doesn't make sense since it greatly increases the complexity and
2842 // would result in an R+R+R addressing mode which no backend
2843 // directly supports and would need to be broken into several
2844 // simpler instructions anyway.
2845 return nullptr;
2846 }
2847 }
2848
2849 // Sink down a layer of the type for the next iteration.
2850 if (J > 0) {
2851 if (J == 1) {
2852 CurTy = Op1->getSourceElementType();
2853 } else {
2854 CurTy =
2855 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2856 }
2857 }
2858 }
2859
2860 NW &= Op2->getNoWrapFlags();
2861 }
2862
2863 // If not all GEPs are identical we'll have to create a new PHI node.
2864 // Check that the old PHI node has only one use so that it will get
2865 // removed.
2866 if (DI != -1 && !PN->hasOneUse())
2867 return nullptr;
2868
2869 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2870 NewGEP->setNoWrapFlags(NW);
2871
2872 if (DI == -1) {
2873 // All the GEPs feeding the PHI are identical. Clone one down into our
2874 // BB so that it can be merged with the current GEP.
2875 } else {
2876 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2877 // into the current block so it can be merged, and create a new PHI to
2878 // set that index.
2879 PHINode *NewPN;
2880 {
2881 IRBuilderBase::InsertPointGuard Guard(Builder);
2882 Builder.SetInsertPoint(PN);
2883 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2884 PN->getNumOperands());
2885 }
2886
2887 for (auto &I : PN->operands())
2888 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2889 PN->getIncomingBlock(I));
2890
2891 NewGEP->setOperand(DI, NewPN);
2892 }
2893
2894 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
2895 return NewGEP;
2896}
2897
2899 Value *PtrOp = GEP.getOperand(0);
2900 SmallVector<Value *, 8> Indices(GEP.indices());
2901 Type *GEPType = GEP.getType();
2902 Type *GEPEltType = GEP.getSourceElementType();
2903 if (Value *V =
2904 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
2906 return replaceInstUsesWith(GEP, V);
2907
2908 // For vector geps, use the generic demanded vector support.
2909 // Skip if GEP return type is scalable. The number of elements is unknown at
2910 // compile-time.
2911 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2912 auto VWidth = GEPFVTy->getNumElements();
2913 APInt PoisonElts(VWidth, 0);
2914 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2915 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2916 PoisonElts)) {
2917 if (V != &GEP)
2918 return replaceInstUsesWith(GEP, V);
2919 return &GEP;
2920 }
2921
2922 // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2923 // possible (decide on canonical form for pointer broadcast), 3) exploit
2924 // undef elements to decrease demanded bits
2925 }
2926
2927 // Eliminate unneeded casts for indices, and replace indices which displace
2928 // by multiples of a zero size type with zero.
2929 bool MadeChange = false;
2930
2931 // Index width may not be the same width as pointer width.
2932 // Data layout chooses the right type based on supported integer types.
2933 Type *NewScalarIndexTy =
2934 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2935
2937 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2938 ++I, ++GTI) {
2939 // Skip indices into struct types.
2940 if (GTI.isStruct())
2941 continue;
2942
2943 Type *IndexTy = (*I)->getType();
2944 Type *NewIndexType =
2945 IndexTy->isVectorTy()
2946 ? VectorType::get(NewScalarIndexTy,
2947 cast<VectorType>(IndexTy)->getElementCount())
2948 : NewScalarIndexTy;
2949
2950 // If the element type has zero size then any index over it is equivalent
2951 // to an index of zero, so replace it with zero if it is not zero already.
2952 Type *EltTy = GTI.getIndexedType();
2953 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2954 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2955 *I = Constant::getNullValue(NewIndexType);
2956 MadeChange = true;
2957 }
2958
2959 if (IndexTy != NewIndexType) {
2960 // If we are using a wider index than needed for this platform, shrink
2961 // it to what we need. If narrower, sign-extend it to what we need.
2962 // This explicit cast can make subsequent optimizations more obvious.
2963 *I = Builder.CreateIntCast(*I, NewIndexType, true);
2964 MadeChange = true;
2965 }
2966 }
2967 if (MadeChange)
2968 return &GEP;
2969
2970 // Canonicalize constant GEPs to i8 type.
2971 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
2973 if (GEP.accumulateConstantOffset(DL, Offset))
2974 return replaceInstUsesWith(
2976 GEP.getNoWrapFlags()));
2977 }
2978
2980 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
2981 Value *NewGEP =
2982 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
2983 return replaceInstUsesWith(GEP, NewGEP);
2984 }
2985
2986 // Check to see if the inputs to the PHI node are getelementptr instructions.
2987 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2988 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
2989 return replaceOperand(GEP, 0, NewPtrOp);
2990 }
2991
2992 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2993 if (Instruction *I = visitGEPOfGEP(GEP, Src))
2994 return I;
2995
2996 if (GEP.getNumIndices() == 1) {
2997 unsigned AS = GEP.getPointerAddressSpace();
2998 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2999 DL.getIndexSizeInBits(AS)) {
3000 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3001
3002 if (TyAllocSize == 1) {
3003 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3004 // but only if the result pointer is only used as if it were an integer,
3005 // or both point to the same underlying object (otherwise provenance is
3006 // not necessarily retained).
3007 Value *X = GEP.getPointerOperand();
3008 Value *Y;
3009 if (match(GEP.getOperand(1),
3011 GEPType == Y->getType()) {
3012 bool HasSameUnderlyingObject =
3014 bool Changed = false;
3015 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3016 bool ShouldReplace = HasSameUnderlyingObject ||
3017 isa<ICmpInst>(U.getUser()) ||
3018 isa<PtrToIntInst>(U.getUser());
3019 Changed |= ShouldReplace;
3020 return ShouldReplace;
3021 });
3022 return Changed ? &GEP : nullptr;
3023 }
3024 } else if (auto *ExactIns =
3025 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3026 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3027 Value *V;
3028 if (ExactIns->isExact()) {
3029 if ((has_single_bit(TyAllocSize) &&
3030 match(GEP.getOperand(1),
3031 m_Shr(m_Value(V),
3032 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3033 match(GEP.getOperand(1),
3034 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3036 GEP.getPointerOperand(), V,
3037 GEP.getNoWrapFlags());
3038 }
3039 }
3040 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3041 // Try to canonicalize non-i8 element type to i8 if the index is an
3042 // exact instruction. If the index is an exact instruction (div/shr)
3043 // with a constant RHS, we can fold the non-i8 element scale into the
3044 // div/shr (similiar to the mul case, just inverted).
3045 const APInt *C;
3046 std::optional<APInt> NewC;
3047 if (has_single_bit(TyAllocSize) &&
3048 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3049 C->uge(countr_zero(TyAllocSize)))
3050 NewC = *C - countr_zero(TyAllocSize);
3051 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3052 APInt Quot;
3053 uint64_t Rem;
3054 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3055 if (Rem == 0)
3056 NewC = Quot;
3057 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3058 APInt Quot;
3059 int64_t Rem;
3060 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3061 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3062 if (!Quot.isAllOnes() && Rem == 0)
3063 NewC = Quot;
3064 }
3065
3066 if (NewC.has_value()) {
3067 Value *NewOp = Builder.CreateBinOp(
3068 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3069 ConstantInt::get(V->getType(), *NewC));
3070 cast<BinaryOperator>(NewOp)->setIsExact();
3072 GEP.getPointerOperand(), NewOp,
3073 GEP.getNoWrapFlags());
3074 }
3075 }
3076 }
3077 }
3078 }
3079 // We do not handle pointer-vector geps here.
3080 if (GEPType->isVectorTy())
3081 return nullptr;
3082
3083 if (GEP.getNumIndices() == 1) {
3084 // We can only preserve inbounds if the original gep is inbounds, the add
3085 // is nsw, and the add operands are non-negative.
3086 auto CanPreserveInBounds = [&](bool AddIsNSW, Value *Idx1, Value *Idx2) {
3088 return GEP.isInBounds() && AddIsNSW && isKnownNonNegative(Idx1, Q) &&
3089 isKnownNonNegative(Idx2, Q);
3090 };
3091
3092 // Try to replace ADD + GEP with GEP + GEP.
3093 Value *Idx1, *Idx2;
3094 if (match(GEP.getOperand(1),
3095 m_OneUse(m_Add(m_Value(Idx1), m_Value(Idx2))))) {
3096 // %idx = add i64 %idx1, %idx2
3097 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3098 // as:
3099 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3100 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3101 bool IsInBounds = CanPreserveInBounds(
3102 cast<OverflowingBinaryOperator>(GEP.getOperand(1))->hasNoSignedWrap(),
3103 Idx1, Idx2);
3104 auto *NewPtr =
3105 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3106 Idx1, "", IsInBounds);
3107 return replaceInstUsesWith(
3108 GEP, Builder.CreateGEP(GEP.getSourceElementType(), NewPtr, Idx2, "",
3109 IsInBounds));
3110 }
3111 ConstantInt *C;
3112 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAdd(
3113 m_Value(Idx1), m_ConstantInt(C))))))) {
3114 // %add = add nsw i32 %idx1, idx2
3115 // %sidx = sext i32 %add to i64
3116 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3117 // as:
3118 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3119 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3120 bool IsInBounds = CanPreserveInBounds(
3121 /*IsNSW=*/true, Idx1, C);
3122 auto *NewPtr = Builder.CreateGEP(
3123 GEP.getSourceElementType(), GEP.getPointerOperand(),
3124 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "",
3125 IsInBounds);
3126 return replaceInstUsesWith(
3127 GEP,
3128 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3129 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3130 "", IsInBounds));
3131 }
3132 }
3133
3134 if (!GEP.isInBounds()) {
3135 unsigned IdxWidth =
3137 APInt BasePtrOffset(IdxWidth, 0);
3138 Value *UnderlyingPtrOp =
3140 BasePtrOffset);
3141 bool CanBeNull, CanBeFreed;
3142 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3143 DL, CanBeNull, CanBeFreed);
3144 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3145 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3146 BasePtrOffset.isNonNegative()) {
3147 APInt AllocSize(IdxWidth, DerefBytes);
3148 if (BasePtrOffset.ule(AllocSize)) {
3150 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3151 }
3152 }
3153 }
3154 }
3155
3156 // nusw + nneg -> nuw
3157 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3158 all_of(GEP.indices(), [&](Value *Idx) {
3159 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3160 })) {
3161 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3162 return &GEP;
3163 }
3164
3166 return R;
3167
3168 return nullptr;
3169}
3170
3172 Instruction *AI) {
3173 if (isa<ConstantPointerNull>(V))
3174 return true;
3175 if (auto *LI = dyn_cast<LoadInst>(V))
3176 return isa<GlobalVariable>(LI->getPointerOperand());
3177 // Two distinct allocations will never be equal.
3178 return isAllocLikeFn(V, &TLI) && V != AI;
3179}
3180
3181/// Given a call CB which uses an address UsedV, return true if we can prove the
3182/// call's only possible effect is storing to V.
3183static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3184 const TargetLibraryInfo &TLI) {
3185 if (!CB.use_empty())
3186 // TODO: add recursion if returned attribute is present
3187 return false;
3188
3189 if (CB.isTerminator())
3190 // TODO: remove implementation restriction
3191 return false;
3192
3193 if (!CB.willReturn() || !CB.doesNotThrow())
3194 return false;
3195
3196 // If the only possible side effect of the call is writing to the alloca,
3197 // and the result isn't used, we can safely remove any reads implied by the
3198 // call including those which might read the alloca itself.
3199 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3200 return Dest && Dest->Ptr == UsedV;
3201}
3202
3205 const TargetLibraryInfo &TLI) {
3207 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3208 Worklist.push_back(AI);
3209
3210 do {
3211 Instruction *PI = Worklist.pop_back_val();
3212 for (User *U : PI->users()) {
3213 Instruction *I = cast<Instruction>(U);
3214 switch (I->getOpcode()) {
3215 default:
3216 // Give up the moment we see something we can't handle.
3217 return false;
3218
3219 case Instruction::AddrSpaceCast:
3220 case Instruction::BitCast:
3221 case Instruction::GetElementPtr:
3222 Users.emplace_back(I);
3223 Worklist.push_back(I);
3224 continue;
3225
3226 case Instruction::ICmp: {
3227 ICmpInst *ICI = cast<ICmpInst>(I);
3228 // We can fold eq/ne comparisons with null to false/true, respectively.
3229 // We also fold comparisons in some conditions provided the alloc has
3230 // not escaped (see isNeverEqualToUnescapedAlloc).
3231 if (!ICI->isEquality())
3232 return false;
3233 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3234 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3235 return false;
3236
3237 // Do not fold compares to aligned_alloc calls, as they may have to
3238 // return null in case the required alignment cannot be satisfied,
3239 // unless we can prove that both alignment and size are valid.
3240 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3241 // Check if alignment and size of a call to aligned_alloc is valid,
3242 // that is alignment is a power-of-2 and the size is a multiple of the
3243 // alignment.
3244 const APInt *Alignment;
3245 const APInt *Size;
3246 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3247 match(CB->getArgOperand(1), m_APInt(Size)) &&
3248 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3249 };
3250 auto *CB = dyn_cast<CallBase>(AI);
3251 LibFunc TheLibFunc;
3252 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3253 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3254 !AlignmentAndSizeKnownValid(CB))
3255 return false;
3256 Users.emplace_back(I);
3257 continue;
3258 }
3259
3260 case Instruction::Call:
3261 // Ignore no-op and store intrinsics.
3262 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3263 switch (II->getIntrinsicID()) {
3264 default:
3265 return false;
3266
3267 case Intrinsic::memmove:
3268 case Intrinsic::memcpy:
3269 case Intrinsic::memset: {
3270 MemIntrinsic *MI = cast<MemIntrinsic>(II);
3271 if (MI->isVolatile() || MI->getRawDest() != PI)
3272 return false;
3273 [[fallthrough]];
3274 }
3275 case Intrinsic::assume:
3276 case Intrinsic::invariant_start:
3277 case Intrinsic::invariant_end:
3278 case Intrinsic::lifetime_start:
3279 case Intrinsic::lifetime_end:
3280 case Intrinsic::objectsize:
3281 Users.emplace_back(I);
3282 continue;
3283 case Intrinsic::launder_invariant_group:
3284 case Intrinsic::strip_invariant_group:
3285 Users.emplace_back(I);
3286 Worklist.push_back(I);
3287 continue;
3288 }
3289 }
3290
3291 if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3292 Users.emplace_back(I);
3293 continue;
3294 }
3295
3296 if (getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3297 getAllocationFamily(I, &TLI) == Family) {
3298 assert(Family);
3299 Users.emplace_back(I);
3300 continue;
3301 }
3302
3303 if (getReallocatedOperand(cast<CallBase>(I)) == PI &&
3304 getAllocationFamily(I, &TLI) == Family) {
3305 assert(Family);
3306 Users.emplace_back(I);
3307 Worklist.push_back(I);
3308 continue;
3309 }
3310
3311 return false;
3312
3313 case Instruction::Store: {
3314 StoreInst *SI = cast<StoreInst>(I);
3315 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3316 return false;
3317 Users.emplace_back(I);
3318 continue;
3319 }
3320 }
3321 llvm_unreachable("missing a return?");
3322 }
3323 } while (!Worklist.empty());
3324 return true;
3325}
3326
3328 assert(isa<AllocaInst>(MI) || isRemovableAlloc(&cast<CallBase>(MI), &TLI));
3329
3330 // If we have a malloc call which is only used in any amount of comparisons to
3331 // null and free calls, delete the calls and replace the comparisons with true
3332 // or false as appropriate.
3333
3334 // This is based on the principle that we can substitute our own allocation
3335 // function (which will never return null) rather than knowledge of the
3336 // specific function being called. In some sense this can change the permitted
3337 // outputs of a program (when we convert a malloc to an alloca, the fact that
3338 // the allocation is now on the stack is potentially visible, for example),
3339 // but we believe in a permissible manner.
3341
3342 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3343 // before each store.
3346 std::unique_ptr<DIBuilder> DIB;
3347 if (isa<AllocaInst>(MI)) {
3348 findDbgUsers(DVIs, &MI, &DVRs);
3349 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3350 }
3351
3352 if (isAllocSiteRemovable(&MI, Users, TLI)) {
3353 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3354 // Lowering all @llvm.objectsize calls first because they may
3355 // use a bitcast/GEP of the alloca we are removing.
3356 if (!Users[i])
3357 continue;
3358
3359 Instruction *I = cast<Instruction>(&*Users[i]);
3360
3361 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3362 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3363 SmallVector<Instruction *> InsertedInstructions;
3364 Value *Result = lowerObjectSizeCall(
3365 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3366 for (Instruction *Inserted : InsertedInstructions)
3367 Worklist.add(Inserted);
3368 replaceInstUsesWith(*I, Result);
3370 Users[i] = nullptr; // Skip examining in the next loop.
3371 }
3372 }
3373 }
3374 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
3375 if (!Users[i])
3376 continue;
3377
3378 Instruction *I = cast<Instruction>(&*Users[i]);
3379
3380 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3382 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3383 C->isFalseWhenEqual()));
3384 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3385 for (auto *DVI : DVIs)
3386 if (DVI->isAddressOfVariable())
3387 ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
3388 for (auto *DVR : DVRs)
3389 if (DVR->isAddressOfVariable())
3390 ConvertDebugDeclareToDebugValue(DVR, SI, *DIB);
3391 } else {
3392 // Casts, GEP, or anything else: we're about to delete this instruction,
3393 // so it can not have any valid uses.
3394 replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
3395 }
3397 }
3398
3399 if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
3400 // Replace invoke with a NOP intrinsic to maintain the original CFG
3401 Module *M = II->getModule();
3402 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3403 InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), {}, "",
3404 II->getParent());
3405 }
3406
3407 // Remove debug intrinsics which describe the value contained within the
3408 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3409 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3410 //
3411 // ```
3412 // define void @foo(i32 %0) {
3413 // %a = alloca i32 ; Deleted.
3414 // store i32 %0, i32* %a
3415 // dbg.value(i32 %0, "arg0") ; Not deleted.
3416 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3417 // call void @trivially_inlinable_no_op(i32* %a)
3418 // ret void
3419 // }
3420 // ```
3421 //
3422 // This may not be required if we stop describing the contents of allocas
3423 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3424 // the LowerDbgDeclare utility.
3425 //
3426 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3427 // "arg0" dbg.value may be stale after the call. However, failing to remove
3428 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3429 //
3430 // FIXME: the Assignment Tracking project has now likely made this
3431 // redundant (and it's sometimes harmful).
3432 for (auto *DVI : DVIs)
3433 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3434 DVI->eraseFromParent();
3435 for (auto *DVR : DVRs)
3436 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3437 DVR->eraseFromParent();
3438
3439 return eraseInstFromFunction(MI);
3440 }
3441 return nullptr;
3442}
3443
3444/// Move the call to free before a NULL test.
3445///
3446/// Check if this free is accessed after its argument has been test
3447/// against NULL (property 0).
3448/// If yes, it is legal to move this call in its predecessor block.
3449///
3450/// The move is performed only if the block containing the call to free
3451/// will be removed, i.e.:
3452/// 1. it has only one predecessor P, and P has two successors
3453/// 2. it contains the call, noops, and an unconditional branch
3454/// 3. its successor is the same as its predecessor's successor
3455///
3456/// The profitability is out-of concern here and this function should
3457/// be called only if the caller knows this transformation would be
3458/// profitable (e.g., for code size).
3460 const DataLayout &DL) {
3461 Value *Op = FI.getArgOperand(0);
3462 BasicBlock *FreeInstrBB = FI.getParent();
3463 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3464
3465 // Validate part of constraint #1: Only one predecessor
3466 // FIXME: We can extend the number of predecessor, but in that case, we
3467 // would duplicate the call to free in each predecessor and it may
3468 // not be profitable even for code size.
3469 if (!PredBB)
3470 return nullptr;
3471
3472 // Validate constraint #2: Does this block contains only the call to
3473 // free, noops, and an unconditional branch?
3474 BasicBlock *SuccBB;
3475 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3476 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3477 return nullptr;
3478
3479 // If there are only 2 instructions in the block, at this point,
3480 // this is the call to free and unconditional.
3481 // If there are more than 2 instructions, check that they are noops
3482 // i.e., they won't hurt the performance of the generated code.
3483 if (FreeInstrBB->size() != 2) {
3484 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3485 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3486 continue;
3487 auto *Cast = dyn_cast<CastInst>(&Inst);
3488 if (!Cast || !Cast->isNoopCast(DL))
3489 return nullptr;
3490 }
3491 }
3492 // Validate the rest of constraint #1 by matching on the pred branch.
3493 Instruction *TI = PredBB->getTerminator();
3494 BasicBlock *TrueBB, *FalseBB;
3495 CmpPredicate Pred;
3496 if (!match(TI, m_Br(m_ICmp(Pred,
3498 m_Specific(Op->stripPointerCasts())),
3499 m_Zero()),
3500 TrueBB, FalseBB)))
3501 return nullptr;
3502 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3503 return nullptr;
3504
3505 // Validate constraint #3: Ensure the null case just falls through.
3506 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3507 return nullptr;
3508 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3509 "Broken CFG: missing edge from predecessor to successor");
3510
3511 // At this point, we know that everything in FreeInstrBB can be moved
3512 // before TI.
3513 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3514 if (&Instr == FreeInstrBBTerminator)
3515 break;
3516 Instr.moveBeforePreserving(TI);
3517 }
3518 assert(FreeInstrBB->size() == 1 &&
3519 "Only the branch instruction should remain");
3520
3521 // Now that we've moved the call to free before the NULL check, we have to
3522 // remove any attributes on its parameter that imply it's non-null, because
3523 // those attributes might have only been valid because of the NULL check, and
3524 // we can get miscompiles if we keep them. This is conservative if non-null is
3525 // also implied by something other than the NULL check, but it's guaranteed to
3526 // be correct, and the conservativeness won't matter in practice, since the
3527 // attributes are irrelevant for the call to free itself and the pointer
3528 // shouldn't be used after the call.
3529 AttributeList Attrs = FI.getAttributes();
3530 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3531 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3532 if (Dereferenceable.isValid()) {
3533 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3534 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3535 Attribute::Dereferenceable);
3536 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3537 }
3538 FI.setAttributes(Attrs);
3539
3540 return &FI;
3541}
3542
3544 // free undef -> unreachable.
3545 if (isa<UndefValue>(Op)) {
3546 // Leave a marker since we can't modify the CFG here.
3548 return eraseInstFromFunction(FI);
3549 }
3550
3551 // If we have 'free null' delete the instruction. This can happen in stl code
3552 // when lots of inlining happens.
3553 if (isa<ConstantPointerNull>(Op))
3554 return eraseInstFromFunction(FI);
3555
3556 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3557 // realloc() entirely.
3558 CallInst *CI = dyn_cast<CallInst>(Op);
3559 if (CI && CI->hasOneUse())
3560 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3561 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3562
3563 // If we optimize for code size, try to move the call to free before the null
3564 // test so that simplify cfg can remove the empty block and dead code
3565 // elimination the branch. I.e., helps to turn something like:
3566 // if (foo) free(foo);
3567 // into
3568 // free(foo);
3569 //
3570 // Note that we can only do this for 'free' and not for any flavor of
3571 // 'operator delete'; there is no 'operator delete' symbol for which we are
3572 // permitted to invent a call, even if we're passing in a null pointer.
3573 if (MinimizeSize) {
3574 LibFunc Func;
3575 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3577 return I;
3578 }
3579
3580 return nullptr;
3581}
3582
3584 Value *RetVal = RI.getReturnValue();
3585 if (!RetVal || !AttributeFuncs::isNoFPClassCompatibleType(RetVal->getType()))
3586 return nullptr;
3587
3588 Function *F = RI.getFunction();
3589 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
3590 if (ReturnClass == fcNone)
3591 return nullptr;
3592
3593 KnownFPClass KnownClass;
3594 Value *Simplified =
3595 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, 0, &RI);
3596 if (!Simplified)
3597 return nullptr;
3598
3599 return ReturnInst::Create(RI.getContext(), Simplified);
3600}
3601
3602// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3604 // Try to remove the previous instruction if it must lead to unreachable.
3605 // This includes instructions like stores and "llvm.assume" that may not get
3606 // removed by simple dead code elimination.
3607 bool Changed = false;
3608 while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3609 // While we theoretically can erase EH, that would result in a block that
3610 // used to start with an EH no longer starting with EH, which is invalid.
3611 // To make it valid, we'd need to fixup predecessors to no longer refer to
3612 // this block, but that changes CFG, which is not allowed in InstCombine.
3613 if (Prev->isEHPad())
3614 break; // Can not drop any more instructions. We're done here.
3615
3617 break; // Can not drop any more instructions. We're done here.
3618 // Otherwise, this instruction can be freely erased,
3619 // even if it is not side-effect free.
3620
3621 // A value may still have uses before we process it here (for example, in
3622 // another unreachable block), so convert those to poison.
3623 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3624 eraseInstFromFunction(*Prev);
3625 Changed = true;
3626 }
3627 return Changed;
3628}
3629
3632 return nullptr;
3633}
3634
3636 assert(BI.isUnconditional() && "Only for unconditional branches.");
3637
3638 // If this store is the second-to-last instruction in the basic block
3639 // (excluding debug info and bitcasts of pointers) and if the block ends with
3640 // an unconditional branch, try to move the store to the successor block.
3641
3642 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3643 auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3644 return BBI->isDebugOrPseudoInst() ||
3645 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3646 };
3647
3648 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3649 do {
3650 if (BBI != FirstInstr)
3651 --BBI;
3652 } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3653
3654 return dyn_cast<StoreInst>(BBI);
3655 };
3656
3657 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3658 if (mergeStoreIntoSuccessor(*SI))
3659 return &BI;
3660
3661 return nullptr;
3662}
3663
3666 if (!DeadEdges.insert({From, To}).second)
3667 return;
3668
3669 // Replace phi node operands in successor with poison.
3670 for (PHINode &PN : To->phis())
3671 for (Use &U : PN.incoming_values())
3672 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
3673 replaceUse(U, PoisonValue::get(PN.getType()));
3674 addToWorklist(&PN);
3675 MadeIRChange = true;
3676 }
3677
3678 Worklist.push_back(To);
3679}
3680
3681// Under the assumption that I is unreachable, remove it and following
3682// instructions. Changes are reported directly to MadeIRChange.
3685 BasicBlock *BB = I->getParent();
3686 for (Instruction &Inst : make_early_inc_range(
3687 make_range(std::next(BB->getTerminator()->getReverseIterator()),
3688 std::next(I->getReverseIterator())))) {
3689 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3690 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
3691 MadeIRChange = true;
3692 }
3693 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3694 continue;
3695 // RemoveDIs: erase debug-info on this instruction manually.
3696 Inst.dropDbgRecords();
3698 MadeIRChange = true;
3699 }
3700
3701 SmallVector<Value *> Changed;
3702 if (handleUnreachableTerminator(BB->getTerminator(), Changed)) {
3703 MadeIRChange = true;
3704 for (Value *V : Changed)
3705 addToWorklist(cast<Instruction>(V));
3706 }
3707
3708 // Handle potentially dead successors.
3709 for (BasicBlock *Succ : successors(BB))
3710 addDeadEdge(BB, Succ, Worklist);
3711}
3712
3715 while (!Worklist.empty()) {
3716 BasicBlock *BB = Worklist.pop_back_val();
3717 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
3718 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
3719 }))
3720 continue;
3721
3723 }
3724}
3725
3727 BasicBlock *LiveSucc) {
3729 for (BasicBlock *Succ : successors(BB)) {
3730 // The live successor isn't dead.
3731 if (Succ == LiveSucc)
3732 continue;
3733
3734 addDeadEdge(BB, Succ, Worklist);
3735 }
3736
3738}
3739
3741 if (BI.isUnconditional())
3743
3744 // Change br (not X), label True, label False to: br X, label False, True
3745 Value *Cond = BI.getCondition();
3746 Value *X;
3747 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
3748 // Swap Destinations and condition...
3749 BI.swapSuccessors();
3750 if (BPI)
3752 return replaceOperand(BI, 0, X);
3753 }
3754
3755 // Canonicalize logical-and-with-invert as logical-or-with-invert.
3756 // This is done by inverting the condition and swapping successors:
3757 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
3758 Value *Y;
3759 if (isa<SelectInst>(Cond) &&
3760 match(Cond,
3762 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
3763 Value *Or = Builder.CreateLogicalOr(NotX, Y);
3764 BI.swapSuccessors();
3765 if (BPI)
3767 return replaceOperand(BI, 0, Or);
3768 }
3769
3770 // If the condition is irrelevant, remove the use so that other
3771 // transforms on the condition become more effective.
3772 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
3773 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
3774
3775 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3776 CmpPredicate Pred;
3777 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
3778 !isCanonicalPredicate(Pred)) {
3779 // Swap destinations and condition.
3780 auto *Cmp = cast<CmpInst>(Cond);
3781 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
3782 BI.swapSuccessors();
3783 if (BPI)
3785 Worklist.push(Cmp);
3786 return &BI;
3787 }
3788
3789 if (isa<UndefValue>(Cond)) {
3790 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
3791 return nullptr;
3792 }
3793 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3795 BI.getSuccessor(!CI->getZExtValue()));
3796 return nullptr;
3797 }
3798
3799 // Replace all dominated uses of the condition with true/false
3800 // Ignore constant expressions to avoid iterating over uses on other
3801 // functions.
3802 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
3803 for (auto &U : make_early_inc_range(Cond->uses())) {
3804 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
3805 if (DT.dominates(Edge0, U)) {
3806 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
3807 addToWorklist(cast<Instruction>(U.getUser()));
3808 continue;
3809 }
3810 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
3811 if (DT.dominates(Edge1, U)) {
3812 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
3813 addToWorklist(cast<Instruction>(U.getUser()));
3814 }
3815 }
3816 }
3817
3818 DC.registerBranch(&BI);
3819 return nullptr;
3820}
3821
3822// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
3823// we can prove that both (switch C) and (switch X) go to the default when cond
3824// is false/true.
3827 bool IsTrueArm) {
3828 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3829 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
3830 if (!C)
3831 return nullptr;
3832
3833 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
3834 if (CstBB != SI.getDefaultDest())
3835 return nullptr;
3836 Value *X = Select->getOperand(3 - CstOpIdx);
3837 CmpPredicate Pred;
3838 const APInt *RHSC;
3839 if (!match(Select->getCondition(),
3840 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
3841 return nullptr;
3842 if (IsTrueArm)
3843 Pred = ICmpInst::getInversePredicate(Pred);
3844
3845 // See whether we can replace the select with X
3847 for (auto Case : SI.cases())
3848 if (!CR.contains(Case.getCaseValue()->getValue()))
3849 return nullptr;
3850
3851 return X;
3852}
3853
3855 Value *Cond = SI.getCondition();
3856 Value *Op0;
3857 ConstantInt *AddRHS;
3858 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3859 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3860 for (auto Case : SI.cases()) {
3861 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3862 assert(isa<ConstantInt>(NewCase) &&
3863 "Result of expression should be constant");
3864 Case.setValue(cast<ConstantInt>(NewCase));
3865 }
3866 return replaceOperand(SI, 0, Op0);
3867 }
3868
3869 ConstantInt *SubLHS;
3870 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
3871 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
3872 for (auto Case : SI.cases()) {
3873 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
3874 assert(isa<ConstantInt>(NewCase) &&
3875 "Result of expression should be constant");
3876 Case.setValue(cast<ConstantInt>(NewCase));
3877 }
3878 return replaceOperand(SI, 0, Op0);
3879 }
3880
3881 uint64_t ShiftAmt;
3882 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
3883 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
3884 all_of(SI.cases(), [&](const auto &Case) {
3885 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3886 })) {
3887 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
3888 OverflowingBinaryOperator *Shl = cast<OverflowingBinaryOperator>(Cond);
3889 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
3890 Shl->hasOneUse()) {
3891 Value *NewCond = Op0;
3892 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
3893 // If the shift may wrap, we need to mask off the shifted bits.
3894 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
3895 NewCond = Builder.CreateAnd(
3896 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
3897 }
3898 for (auto Case : SI.cases()) {
3899 const APInt &CaseVal = Case.getCaseValue()->getValue();
3900 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
3901 : CaseVal.lshr(ShiftAmt);
3902 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3903 }
3904 return replaceOperand(SI, 0, NewCond);
3905 }
3906 }
3907
3908 // Fold switch(zext/sext(X)) into switch(X) if possible.
3909 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
3910 bool IsZExt = isa<ZExtInst>(Cond);
3911 Type *SrcTy = Op0->getType();
3912 unsigned NewWidth = SrcTy->getScalarSizeInBits();
3913
3914 if (all_of(SI.cases(), [&](const auto &Case) {
3915 const APInt &CaseVal = Case.getCaseValue()->getValue();
3916 return IsZExt ? CaseVal.isIntN(NewWidth)
3917 : CaseVal.isSignedIntN(NewWidth);
3918 })) {
3919 for (auto &Case : SI.cases()) {
3920 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3921 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3922 }
3923 return replaceOperand(SI, 0, Op0);
3924 }
3925 }
3926
3927 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
3928 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
3929 if (Value *V =
3930 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
3931 return replaceOperand(SI, 0, V);
3932 if (Value *V =
3933 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
3934 return replaceOperand(SI, 0, V);
3935 }
3936
3937 KnownBits Known = computeKnownBits(Cond, 0, &SI);
3938 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3939 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3940
3941 // Compute the number of leading bits we can ignore.
3942 // TODO: A better way to determine this would use ComputeNumSignBits().
3943 for (const auto &C : SI.cases()) {
3944 LeadingKnownZeros =
3945 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
3946 LeadingKnownOnes =
3947 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
3948 }
3949
3950 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3951
3952 // Shrink the condition operand if the new type is smaller than the old type.
3953 // But do not shrink to a non-standard type, because backend can't generate
3954 // good code for that yet.
3955 // TODO: We can make it aggressive again after fixing PR39569.
3956 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3957 shouldChangeType(Known.getBitWidth(), NewWidth)) {
3958 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3960 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3961
3962 for (auto Case : SI.cases()) {
3963 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3964 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3965 }
3966 return replaceOperand(SI, 0, NewCond);
3967 }
3968
3969 if (isa<UndefValue>(Cond)) {
3970 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
3971 return nullptr;
3972 }
3973 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
3974 handlePotentiallyDeadSuccessors(SI.getParent(),
3975 SI.findCaseValue(CI)->getCaseSuccessor());
3976 return nullptr;
3977 }
3978
3979 return nullptr;
3980}
3981
3983InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
3984 auto *WO = dyn_cast<WithOverflowInst>(EV.getAggregateOperand());
3985 if (!WO)
3986 return nullptr;
3987
3988 Intrinsic::ID OvID = WO->getIntrinsicID();
3989 const APInt *C = nullptr;
3990 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
3991 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3992 OvID == Intrinsic::umul_with_overflow)) {
3993 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
3994 if (C->isAllOnes())
3995 return BinaryOperator::CreateNeg(WO->getLHS());
3996 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
3997 if (C->isPowerOf2()) {
3998 return BinaryOperator::CreateShl(
3999 WO->getLHS(),
4000 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4001 }
4002 }
4003 }
4004
4005 // We're extracting from an overflow intrinsic. See if we're the only user.
4006 // That allows us to simplify multiple result intrinsics to simpler things
4007 // that just get one value.
4008 if (!WO->hasOneUse())
4009 return nullptr;
4010
4011 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4012 // and replace it with a traditional binary instruction.
4013 if (*EV.idx_begin() == 0) {
4014 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4015 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4016 // Replace the old instruction's uses with poison.
4017 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4019 return BinaryOperator::Create(BinOp, LHS, RHS);
4020 }
4021
4022 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4023
4024 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4025 if (OvID == Intrinsic::usub_with_overflow)
4026 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4027
4028 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4029 // +1 is not possible because we assume signed values.
4030 if (OvID == Intrinsic::smul_with_overflow &&
4031 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4032 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4033
4034 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4035 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4036 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4037 // Only handle even bitwidths for performance reasons.
4038 if (BitWidth % 2 == 0)
4039 return new ICmpInst(
4040 ICmpInst::ICMP_UGT, WO->getLHS(),
4041 ConstantInt::get(WO->getLHS()->getType(),
4043 }
4044
4045 // If only the overflow result is used, and the right hand side is a
4046 // constant (or constant splat), we can remove the intrinsic by directly
4047 // checking for overflow.
4048 if (C) {
4049 // Compute the no-wrap range for LHS given RHS=C, then construct an
4050 // equivalent icmp, potentially using an offset.
4052 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4053
4054 CmpInst::Predicate Pred;
4055 APInt NewRHSC, Offset;
4056 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4057 auto *OpTy = WO->getRHS()->getType();
4058 auto *NewLHS = WO->getLHS();
4059 if (Offset != 0)
4060 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4061 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4062 ConstantInt::get(OpTy, NewRHSC));
4063 }
4064
4065 return nullptr;
4066}
4067
4069 Value *Agg = EV.getAggregateOperand();
4070
4071 if (!EV.hasIndices())
4072 return replaceInstUsesWith(EV, Agg);
4073
4074 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4075 SQ.getWithInstruction(&EV)))
4076 return replaceInstUsesWith(EV, V);
4077
4078 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
4079 // We're extracting from an insertvalue instruction, compare the indices
4080 const unsigned *exti, *exte, *insi, *inse;
4081 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4082 exte = EV.idx_end(), inse = IV->idx_end();
4083 exti != exte && insi != inse;
4084 ++exti, ++insi) {
4085 if (*insi != *exti)
4086 // The insert and extract both reference distinctly different elements.
4087 // This means the extract is not influenced by the insert, and we can
4088 // replace the aggregate operand of the extract with the aggregate
4089 // operand of the insert. i.e., replace
4090 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4091 // %E = extractvalue { i32, { i32 } } %I, 0
4092 // with
4093 // %E = extractvalue { i32, { i32 } } %A, 0
4094 return ExtractValueInst::Create(IV->getAggregateOperand(),
4095 EV.getIndices());
4096 }
4097 if (exti == exte && insi == inse)
4098 // Both iterators are at the end: Index lists are identical. Replace
4099 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4100 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4101 // with "i32 42"
4102 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4103 if (exti == exte) {
4104 // The extract list is a prefix of the insert list. i.e. replace
4105 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4106 // %E = extractvalue { i32, { i32 } } %I, 1
4107 // with
4108 // %X = extractvalue { i32, { i32 } } %A, 1
4109 // %E = insertvalue { i32 } %X, i32 42, 0
4110 // by switching the order of the insert and extract (though the
4111 // insertvalue should be left in, since it may have other uses).
4112 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4113 EV.getIndices());
4114 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4115 ArrayRef(insi, inse));
4116 }
4117 if (insi == inse)
4118 // The insert list is a prefix of the extract list
4119 // We can simply remove the common indices from the extract and make it
4120 // operate on the inserted value instead of the insertvalue result.
4121 // i.e., replace
4122 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4123 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4124 // with
4125 // %E extractvalue { i32 } { i32 42 }, 0
4126 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4127 ArrayRef(exti, exte));
4128 }
4129
4130 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4131 return R;
4132
4133 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4134 // Bail out if the aggregate contains scalable vector type
4135 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4136 STy && STy->isScalableTy())
4137 return nullptr;
4138
4139 // If the (non-volatile) load only has one use, we can rewrite this to a
4140 // load from a GEP. This reduces the size of the load. If a load is used
4141 // only by extractvalue instructions then this either must have been
4142 // optimized before, or it is a struct with padding, in which case we
4143 // don't want to do the transformation as it loses padding knowledge.
4144 if (L->isSimple() && L->hasOneUse()) {
4145 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4146 SmallVector<Value*, 4> Indices;
4147 // Prefix an i32 0 since we need the first element.
4148 Indices.push_back(Builder.getInt32(0));
4149 for (unsigned Idx : EV.indices())
4150 Indices.push_back(Builder.getInt32(Idx));
4151
4152 // We need to insert these at the location of the old load, not at that of
4153 // the extractvalue.
4155 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4156 L->getPointerOperand(), Indices);
4158 // Whatever aliasing information we had for the orignal load must also
4159 // hold for the smaller load, so propagate the annotations.
4160 NL->setAAMetadata(L->getAAMetadata());
4161 // Returning the load directly will cause the main loop to insert it in
4162 // the wrong spot, so use replaceInstUsesWith().
4163 return replaceInstUsesWith(EV, NL);
4164 }
4165 }
4166
4167 if (auto *PN = dyn_cast<PHINode>(Agg))
4168 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4169 return Res;
4170
4171 // Canonicalize extract (select Cond, TV, FV)
4172 // -> select cond, (extract TV), (extract FV)
4173 if (auto *SI = dyn_cast<SelectInst>(Agg))
4174 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4175 return R;
4176
4177 // We could simplify extracts from other values. Note that nested extracts may
4178 // already be simplified implicitly by the above: extract (extract (insert) )
4179 // will be translated into extract ( insert ( extract ) ) first and then just
4180 // the value inserted, if appropriate. Similarly for extracts from single-use
4181 // loads: extract (extract (load)) will be translated to extract (load (gep))
4182 // and if again single-use then via load (gep (gep)) to load (gep).
4183 // However, double extracts from e.g. function arguments or return values
4184 // aren't handled yet.
4185 return nullptr;
4186}
4187
4188/// Return 'true' if the given typeinfo will match anything.
4189static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4190 switch (Personality) {
4194 // The GCC C EH and Rust personality only exists to support cleanups, so
4195 // it's not clear what the semantics of catch clauses are.
4196 return false;
4198 return false;
4200 // While __gnat_all_others_value will match any Ada exception, it doesn't
4201 // match foreign exceptions (or didn't, before gcc-4.7).
4202 return false;
4213 return TypeInfo->isNullValue();
4214 }
4215 llvm_unreachable("invalid enum");
4216}
4217
4218static bool shorter_filter(const Value *LHS, const Value *RHS) {
4219 return
4220 cast<ArrayType>(LHS->getType())->getNumElements()
4221 <
4222 cast<ArrayType>(RHS->getType())->getNumElements();
4223}
4224
4226 // The logic here should be correct for any real-world personality function.
4227 // However if that turns out not to be true, the offending logic can always
4228 // be conditioned on the personality function, like the catch-all logic is.
4229 EHPersonality Personality =
4230 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4231
4232 // Simplify the list of clauses, eg by removing repeated catch clauses
4233 // (these are often created by inlining).
4234 bool MakeNewInstruction = false; // If true, recreate using the following:
4235 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4236 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4237
4238 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4239 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4240 bool isLastClause = i + 1 == e;
4241 if (LI.isCatch(i)) {
4242 // A catch clause.
4243 Constant *CatchClause = LI.getClause(i);
4244 Constant *TypeInfo = CatchClause->stripPointerCasts();
4245
4246 // If we already saw this clause, there is no point in having a second
4247 // copy of it.
4248 if (AlreadyCaught.insert(TypeInfo).second) {
4249 // This catch clause was not already seen.
4250 NewClauses.push_back(CatchClause);
4251 } else {
4252 // Repeated catch clause - drop the redundant copy.
4253 MakeNewInstruction = true;
4254 }
4255
4256 // If this is a catch-all then there is no point in keeping any following
4257 // clauses or marking the landingpad as having a cleanup.
4258 if (isCatchAll(Personality, TypeInfo)) {
4259 if (!isLastClause)
4260 MakeNewInstruction = true;
4261 CleanupFlag = false;
4262 break;
4263 }
4264 } else {
4265 // A filter clause. If any of the filter elements were already caught
4266 // then they can be dropped from the filter. It is tempting to try to
4267 // exploit the filter further by saying that any typeinfo that does not
4268 // occur in the filter can't be caught later (and thus can be dropped).
4269 // However this would be wrong, since typeinfos can match without being
4270 // equal (for example if one represents a C++ class, and the other some
4271 // class derived from it).
4272 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4273 Constant *FilterClause = LI.getClause(i);
4274 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4275 unsigned NumTypeInfos = FilterType->getNumElements();
4276
4277 // An empty filter catches everything, so there is no point in keeping any
4278 // following clauses or marking the landingpad as having a cleanup. By
4279 // dealing with this case here the following code is made a bit simpler.
4280 if (!NumTypeInfos) {
4281 NewClauses.push_back(FilterClause);
4282 if (!isLastClause)
4283 MakeNewInstruction = true;
4284 CleanupFlag = false;
4285 break;
4286 }
4287
4288 bool MakeNewFilter = false; // If true, make a new filter.
4289 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4290 if (isa<ConstantAggregateZero>(FilterClause)) {
4291 // Not an empty filter - it contains at least one null typeinfo.
4292 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4293 Constant *TypeInfo =
4295 // If this typeinfo is a catch-all then the filter can never match.
4296 if (isCatchAll(Personality, TypeInfo)) {
4297 // Throw the filter away.
4298 MakeNewInstruction = true;
4299 continue;
4300 }
4301
4302 // There is no point in having multiple copies of this typeinfo, so
4303 // discard all but the first copy if there is more than one.
4304 NewFilterElts.push_back(TypeInfo);
4305 if (NumTypeInfos > 1)
4306 MakeNewFilter = true;
4307 } else {
4308 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4309 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4310 NewFilterElts.reserve(NumTypeInfos);
4311
4312 // Remove any filter elements that were already caught or that already
4313 // occurred in the filter. While there, see if any of the elements are
4314 // catch-alls. If so, the filter can be discarded.
4315 bool SawCatchAll = false;
4316 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4317 Constant *Elt = Filter->getOperand(j);
4318 Constant *TypeInfo = Elt->stripPointerCasts();
4319 if (isCatchAll(Personality, TypeInfo)) {
4320 // This element is a catch-all. Bail out, noting this fact.
4321 SawCatchAll = true;
4322 break;
4323 }
4324
4325 // Even if we've seen a type in a catch clause, we don't want to
4326 // remove it from the filter. An unexpected type handler may be
4327 // set up for a call site which throws an exception of the same
4328 // type caught. In order for the exception thrown by the unexpected
4329 // handler to propagate correctly, the filter must be correctly
4330 // described for the call site.
4331 //
4332 // Example:
4333 //
4334 // void unexpected() { throw 1;}
4335 // void foo() throw (int) {
4336 // std::set_unexpected(unexpected);
4337 // try {
4338 // throw 2.0;
4339 // } catch (int i) {}
4340 // }
4341
4342 // There is no point in having multiple copies of the same typeinfo in
4343 // a filter, so only add it if we didn't already.
4344 if (SeenInFilter.insert(TypeInfo).second)
4345 NewFilterElts.push_back(cast<Constant>(Elt));
4346 }
4347 // A filter containing a catch-all cannot match anything by definition.
4348 if (SawCatchAll) {
4349 // Throw the filter away.
4350 MakeNewInstruction = true;
4351 continue;
4352 }
4353
4354 // If we dropped something from the filter, make a new one.
4355 if (NewFilterElts.size() < NumTypeInfos)
4356 MakeNewFilter = true;
4357 }
4358 if (MakeNewFilter) {
4359 FilterType = ArrayType::get(FilterType->getElementType(),
4360 NewFilterElts.size());
4361 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4362 MakeNewInstruction = true;
4363 }
4364
4365 NewClauses.push_back(FilterClause);
4366
4367 // If the new filter is empty then it will catch everything so there is
4368 // no point in keeping any following clauses or marking the landingpad
4369 // as having a cleanup. The case of the original filter being empty was
4370 // already handled above.
4371 if (MakeNewFilter && !NewFilterElts.size()) {
4372 assert(MakeNewInstruction && "New filter but not a new instruction!");
4373 CleanupFlag = false;
4374 break;
4375 }
4376 }
4377 }
4378
4379 // If several filters occur in a row then reorder them so that the shortest
4380 // filters come first (those with the smallest number of elements). This is
4381 // advantageous because shorter filters are more likely to match, speeding up
4382 // unwinding, but mostly because it increases the effectiveness of the other
4383 // filter optimizations below.
4384 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4385 unsigned j;
4386 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4387 for (j = i; j != e; ++j)
4388 if (!isa<ArrayType>(NewClauses[j]->getType()))
4389 break;
4390
4391 // Check whether the filters are already sorted by length. We need to know
4392 // if sorting them is actually going to do anything so that we only make a
4393 // new landingpad instruction if it does.
4394 for (unsigned k = i; k + 1 < j; ++k)
4395 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4396 // Not sorted, so sort the filters now. Doing an unstable sort would be
4397 // correct too but reordering filters pointlessly might confuse users.
4398 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4400 MakeNewInstruction = true;
4401 break;
4402 }
4403
4404 // Look for the next batch of filters.
4405 i = j + 1;
4406 }
4407
4408 // If typeinfos matched if and only if equal, then the elements of a filter L
4409 // that occurs later than a filter F could be replaced by the intersection of
4410 // the elements of F and L. In reality two typeinfos can match without being
4411 // equal (for example if one represents a C++ class, and the other some class
4412 // derived from it) so it would be wrong to perform this transform in general.
4413 // However the transform is correct and useful if F is a subset of L. In that
4414 // case L can be replaced by F, and thus removed altogether since repeating a
4415 // filter is pointless. So here we look at all pairs of filters F and L where
4416 // L follows F in the list of clauses, and remove L if every element of F is
4417 // an element of L. This can occur when inlining C++ functions with exception
4418 // specifications.
4419 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4420 // Examine each filter in turn.
4421 Value *Filter = NewClauses[i];
4422 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4423 if (!FTy)
4424 // Not a filter - skip it.
4425 continue;
4426 unsigned FElts = FTy->getNumElements();
4427 // Examine each filter following this one. Doing this backwards means that
4428 // we don't have to worry about filters disappearing under us when removed.
4429 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4430 Value *LFilter = NewClauses[j];
4431 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4432 if (!LTy)
4433 // Not a filter - skip it.
4434 continue;
4435 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4436 // an element of LFilter, then discard LFilter.
4437 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4438 // If Filter is empty then it is a subset of LFilter.
4439 if (!FElts) {
4440 // Discard LFilter.
4441 NewClauses.erase(J);
4442 MakeNewInstruction = true;
4443 // Move on to the next filter.
4444 continue;
4445 }
4446 unsigned LElts = LTy->getNumElements();
4447 // If Filter is longer than LFilter then it cannot be a subset of it.
4448 if (FElts > LElts)
4449 // Move on to the next filter.
4450 continue;
4451 // At this point we know that LFilter has at least one element.
4452 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4453 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4454 // already know that Filter is not longer than LFilter).
4455 if (isa<ConstantAggregateZero>(Filter)) {
4456 assert(FElts <= LElts && "Should have handled this case earlier!");
4457 // Discard LFilter.
4458 NewClauses.erase(J);
4459 MakeNewInstruction = true;
4460 }
4461 // Move on to the next filter.
4462 continue;
4463 }
4464 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4465 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4466 // Since Filter is non-empty and contains only zeros, it is a subset of
4467 // LFilter iff LFilter contains a zero.
4468 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4469 for (unsigned l = 0; l != LElts; ++l)
4470 if (LArray->getOperand(l)->isNullValue()) {
4471 // LFilter contains a zero - discard it.
4472 NewClauses.erase(J);
4473 MakeNewInstruction = true;
4474 break;
4475 }
4476 // Move on to the next filter.
4477 continue;
4478 }
4479 // At this point we know that both filters are ConstantArrays. Loop over
4480 // operands to see whether every element of Filter is also an element of
4481 // LFilter. Since filters tend to be short this is probably faster than
4482 // using a method that scales nicely.
4483 ConstantArray *FArray = cast<ConstantArray>(Filter);
4484 bool AllFound = true;
4485 for (unsigned f = 0; f != FElts; ++f) {
4486 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4487 AllFound = false;
4488 for (unsigned l = 0; l != LElts; ++l) {
4489 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4490 if (LTypeInfo == FTypeInfo) {
4491 AllFound = true;
4492 break;
4493 }
4494 }
4495 if (!AllFound)
4496 break;
4497 }
4498 if (AllFound) {
4499 // Discard LFilter.
4500 NewClauses.erase(J);
4501 MakeNewInstruction = true;
4502 }
4503 // Move on to the next filter.
4504 }
4505 }
4506
4507 // If we changed any of the clauses, replace the old landingpad instruction
4508 // with a new one.
4509 if (MakeNewInstruction) {
4511 NewClauses.size());
4512 for (Constant *C : NewClauses)
4513 NLI->addClause(C);
4514 // A landing pad with no clauses must have the cleanup flag set. It is
4515 // theoretically possible, though highly unlikely, that we eliminated all
4516 // clauses. If so, force the cleanup flag to true.
4517 if (NewClauses.empty())
4518 CleanupFlag = true;
4519 NLI->setCleanup(CleanupFlag);
4520 return NLI;
4521 }
4522
4523 // Even if none of the clauses changed, we may nonetheless have understood
4524 // that the cleanup flag is pointless. Clear it if so.
4525 if (LI.isCleanup() != CleanupFlag) {
4526 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
4527 LI.setCleanup(CleanupFlag);
4528 return &LI;
4529 }
4530
4531 return nullptr;
4532}
4533
4534Value *
4536 // Try to push freeze through instructions that propagate but don't produce
4537 // poison as far as possible. If an operand of freeze follows three
4538 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
4539 // guaranteed-non-poison operands then push the freeze through to the one
4540 // operand that is not guaranteed non-poison. The actual transform is as
4541 // follows.
4542 // Op1 = ... ; Op1 can be posion
4543 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
4544 // ; single guaranteed-non-poison operands
4545 // ... = Freeze(Op0)
4546 // =>
4547 // Op1 = ...
4548 // Op1.fr = Freeze(Op1)
4549 // ... = Inst(Op1.fr, NonPoisonOps...)
4550 auto *OrigOp = OrigFI.getOperand(0);
4551 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4552
4553 // While we could change the other users of OrigOp to use freeze(OrigOp), that
4554 // potentially reduces their optimization potential, so let's only do this iff
4555 // the OrigOp is only used by the freeze.
4556 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4557 return nullptr;
4558
4559 // We can't push the freeze through an instruction which can itself create
4560 // poison. If the only source of new poison is flags, we can simply
4561 // strip them (since we know the only use is the freeze and nothing can
4562 // benefit from them.)
4563 if (canCreateUndefOrPoison(cast<Operator>(OrigOp),
4564 /*ConsiderFlagsAndMetadata*/ false))
4565 return nullptr;
4566
4567 // If operand is guaranteed not to be poison, there is no need to add freeze
4568 // to the operand. So we first find the operand that is not guaranteed to be
4569 // poison.
4570 Use *MaybePoisonOperand = nullptr;
4571 for (Use &U : OrigOpInst->operands()) {
4572 if (isa<MetadataAsValue>(U.get()) ||
4574 continue;
4575 if (!MaybePoisonOperand)
4576 MaybePoisonOperand = &U;
4577 else
4578 return nullptr;
4579 }
4580
4581 OrigOpInst->dropPoisonGeneratingAnnotations();
4582
4583 // If all operands are guaranteed to be non-poison, we can drop freeze.
4584 if (!MaybePoisonOperand)
4585 return OrigOp;
4586
4587 Builder.SetInsertPoint(OrigOpInst);
4588 auto *FrozenMaybePoisonOperand = Builder.CreateFreeze(
4589 MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
4590
4591 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4592 return OrigOp;
4593}
4594
4596 PHINode *PN) {
4597 // Detect whether this is a recurrence with a start value and some number of
4598 // backedge values. We'll check whether we can push the freeze through the
4599 // backedge values (possibly dropping poison flags along the way) until we
4600 // reach the phi again. In that case, we can move the freeze to the start
4601 // value.
4602 Use *StartU = nullptr;
4604 for (Use &U : PN->incoming_values()) {
4605 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
4606 // Add backedge value to worklist.
4607 Worklist.push_back(U.get());
4608 continue;
4609 }
4610
4611 // Don't bother handling multiple start values.
4612 if (StartU)
4613 return nullptr;
4614 StartU = &U;
4615 }
4616
4617 if (!StartU || Worklist.empty())
4618 return nullptr; // Not a recurrence.
4619
4620 Value *StartV = StartU->get();
4621 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
4622 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
4623 // We can't insert freeze if the start value is the result of the
4624 // terminator (e.g. an invoke).
4625 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
4626 return nullptr;
4627
4630 while (!Worklist.empty()) {
4631 Value *V = Worklist.pop_back_val();
4632 if (!Visited.insert(V).second)
4633 continue;
4634
4635 if (Visited.size() > 32)
4636 return nullptr; // Limit the total number of values we inspect.
4637
4638 // Assume that PN is non-poison, because it will be after the transform.
4639 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
4640 continue;
4641
4642 Instruction *I = dyn_cast<Instruction>(V);
4643 if (!I || canCreateUndefOrPoison(cast<Operator>(I),
4644 /*ConsiderFlagsAndMetadata*/ false))
4645 return nullptr;
4646
4647 DropFlags.push_back(I);
4648 append_range(Worklist, I->operands());
4649 }
4650
4651 for (Instruction *I : DropFlags)
4652 I->dropPoisonGeneratingAnnotations();
4653
4654 if (StartNeedsFreeze) {
4656 Value *FrozenStartV = Builder.CreateFreeze(StartV,
4657 StartV->getName() + ".fr");
4658 replaceUse(*StartU, FrozenStartV);
4659 }
4660 return replaceInstUsesWith(FI, PN);
4661}
4662
4664 Value *Op = FI.getOperand(0);
4665
4666 if (isa<Constant>(Op) || Op->hasOneUse())
4667 return false;
4668
4669 // Move the freeze directly after the definition of its operand, so that
4670 // it dominates the maximum number of uses. Note that it may not dominate
4671 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
4672 // the normal/default destination. This is why the domination check in the
4673 // replacement below is still necessary.
4674 BasicBlock::iterator MoveBefore;
4675 if (isa<Argument>(Op)) {
4676 MoveBefore =
4678 } else {
4679 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
4680 if (!MoveBeforeOpt)
4681 return false;
4682 MoveBefore = *MoveBeforeOpt;
4683 }
4684
4685 // Don't move to the position of a debug intrinsic.
4686 if (isa<DbgInfoIntrinsic>(MoveBefore))
4687 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4688 // Re-point iterator to come after any debug-info records, if we're
4689 // running in "RemoveDIs" mode
4690 MoveBefore.setHeadBit(false);
4691
4692 bool Changed = false;
4693 if (&FI != &*MoveBefore) {
4694 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
4695 Changed = true;
4696 }
4697
4698 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
4699 bool Dominates = DT.dominates(&FI, U);
4700 Changed |= Dominates;
4701 return Dominates;
4702 });
4703
4704 return Changed;
4705}
4706
4707// Check if any direct or bitcast user of this value is a shuffle instruction.
4709 for (auto *U : V->users()) {
4710 if (isa<ShuffleVectorInst>(U))
4711 return true;
4712 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
4713 return true;
4714 }
4715 return false;
4716}
4717
4719 Value *Op0 = I.getOperand(0);
4720
4722 return replaceInstUsesWith(I, V);
4723
4724 // freeze (phi const, x) --> phi const, (freeze x)
4725 if (auto *PN = dyn_cast<PHINode>(Op0)) {
4726 if (Instruction *NV = foldOpIntoPhi(I, PN))
4727 return NV;
4728 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
4729 return NV;
4730 }
4731
4733 return replaceInstUsesWith(I, NI);
4734
4735 // If I is freeze(undef), check its uses and fold it to a fixed constant.
4736 // - or: pick -1
4737 // - select's condition: if the true value is constant, choose it by making
4738 // the condition true.
4739 // - default: pick 0
4740 //
4741 // Note that this transform is intentionally done here rather than
4742 // via an analysis in InstSimplify or at individual user sites. That is
4743 // because we must produce the same value for all uses of the freeze -
4744 // it's the reason "freeze" exists!
4745 //
4746 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
4747 // duplicating logic for binops at least.
4748 auto getUndefReplacement = [&I](Type *Ty) {
4749 Constant *BestValue = nullptr;
4750 Constant *NullValue = Constant::getNullValue(Ty);
4751 for (const auto *U : I.users()) {
4752 Constant *C = NullValue;
4753 if (match(U, m_Or(m_Value(), m_Value())))
4755 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
4756 C = ConstantInt::getTrue(Ty);
4757
4758 if (!BestValue)
4759 BestValue = C;
4760 else if (BestValue != C)
4761 BestValue = NullValue;
4762 }
4763 assert(BestValue && "Must have at least one use");
4764 return BestValue;
4765 };
4766
4767 if (match(Op0, m_Undef())) {
4768 // Don't fold freeze(undef/poison) if it's used as a vector operand in
4769 // a shuffle. This may improve codegen for shuffles that allow
4770 // unspecified inputs.
4772 return nullptr;
4773 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
4774 }
4775
4776 Constant *C;
4777 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement()) {
4778 Constant *ReplaceC = getUndefReplacement(I.getType()->getScalarType());
4780 }
4781
4782 // Replace uses of Op with freeze(Op).
4783 if (freezeOtherUses(I))
4784 return &I;
4785
4786 return nullptr;
4787}
4788
4789/// Check for case where the call writes to an otherwise dead alloca. This
4790/// shows up for unused out-params in idiomatic C/C++ code. Note that this
4791/// helper *only* analyzes the write; doesn't check any other legality aspect.
4793 auto *CB = dyn_cast<CallBase>(I);
4794 if (!CB)
4795 // TODO: handle e.g. store to alloca here - only worth doing if we extend
4796 // to allow reload along used path as described below. Otherwise, this
4797 // is simply a store to a dead allocation which will be removed.
4798 return false;
4799 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
4800 if (!Dest)
4801 return false;
4802 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
4803 if (!AI)
4804 // TODO: allow malloc?
4805 return false;
4806 // TODO: allow memory access dominated by move point? Note that since AI
4807 // could have a reference to itself captured by the call, we would need to
4808 // account for cycles in doing so.
4809 SmallVector<const User *> AllocaUsers;
4811 auto pushUsers = [&](const Instruction &I) {
4812 for (const User *U : I.users()) {
4813 if (Visited.insert(U).second)
4814 AllocaUsers.push_back(U);
4815 }
4816 };
4817 pushUsers(*AI);
4818 while (!AllocaUsers.empty()) {
4819 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
4820 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
4821 pushUsers(*UserI);
4822 continue;
4823 }
4824 if (UserI == CB)
4825 continue;
4826 // TODO: support lifetime.start/end here
4827 return false;
4828 }
4829 return true;
4830}
4831
4832/// Try to move the specified instruction from its current block into the
4833/// beginning of DestBlock, which can only happen if it's safe to move the
4834/// instruction past all of the instructions between it and the end of its
4835/// block.
4837 BasicBlock *DestBlock) {
4838 BasicBlock *SrcBlock = I->getParent();
4839
4840 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4841 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
4842 I->isTerminator())
4843 return false;
4844
4845 // Do not sink static or dynamic alloca instructions. Static allocas must
4846 // remain in the entry block, and dynamic allocas must not be sunk in between
4847 // a stacksave / stackrestore pair, which would incorrectly shorten its
4848 // lifetime.
4849 if (isa<AllocaInst>(I))
4850 return false;
4851
4852 // Do not sink into catchswitch blocks.
4853 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
4854 return false;
4855
4856 // Do not sink convergent call instructions.
4857 if (auto *CI = dyn_cast<CallInst>(I)) {
4858 if (CI->isConvergent())
4859 return false;
4860 }
4861
4862 // Unless we can prove that the memory write isn't visibile except on the
4863 // path we're sinking to, we must bail.
4864 if (I->mayWriteToMemory()) {
4865 if (!SoleWriteToDeadLocal(I, TLI))
4866 return false;
4867 }
4868
4869 // We can only sink load instructions if there is nothing between the load and
4870 // the end of block that could change the value.
4871 if (I->mayReadFromMemory() &&
4872 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
4873 // We don't want to do any sophisticated alias analysis, so we only check
4874 // the instructions after I in I's parent block if we try to sink to its
4875 // successor block.
4876 if (DestBlock->getUniquePredecessor() != I->getParent())
4877 return false;
4878 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
4879 E = I->getParent()->end();
4880 Scan != E; ++Scan)
4881 if (Scan->mayWriteToMemory())
4882 return false;
4883 }
4884
4885 I->dropDroppableUses([&](const Use *U) {
4886 auto *I = dyn_cast<Instruction>(U->getUser());
4887 if (I && I->getParent() != DestBlock) {
4888 Worklist.add(I);
4889 return true;
4890 }
4891 return false;
4892 });
4893 /// FIXME: We could remove droppable uses that are not dominated by
4894 /// the new position.
4895
4896 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
4897 I->moveBefore(*DestBlock, InsertPos);
4898 ++NumSunkInst;
4899
4900 // Also sink all related debug uses from the source basic block. Otherwise we
4901 // get debug use before the def. Attempt to salvage debug uses first, to
4902 // maximise the range variables have location for. If we cannot salvage, then
4903 // mark the location undef: we know it was supposed to receive a new location
4904 // here, but that computation has been sunk.
4906 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
4907 findDbgUsers(DbgUsers, I, &DbgVariableRecords);
4908 if (!DbgUsers.empty())
4909 tryToSinkInstructionDbgValues(I, InsertPos, SrcBlock, DestBlock, DbgUsers);
4910 if (!DbgVariableRecords.empty())
4911 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
4912 DbgVariableRecords);
4913
4914 // PS: there are numerous flaws with this behaviour, not least that right now
4915 // assignments can be re-ordered past other assignments to the same variable
4916 // if they use different Values. Creating more undef assignements can never be
4917 // undone. And salvaging all users outside of this block can un-necessarily
4918 // alter the lifetime of the live-value that the variable refers to.
4919 // Some of these things can be resolved by tolerating debug use-before-defs in
4920 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
4921 // being used for more architectures.
4922
4923 return true;
4924}
4925
4927 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4929 // For all debug values in the destination block, the sunk instruction
4930 // will still be available, so they do not need to be dropped.
4932 for (auto &DbgUser : DbgUsers)
4933 if (DbgUser->getParent() != DestBlock)
4934 DbgUsersToSalvage.push_back(DbgUser);
4935
4936 // Process the sinking DbgUsersToSalvage in reverse order, as we only want
4937 // to clone the last appearing debug intrinsic for each given variable.
4939 for (DbgVariableIntrinsic *DVI : DbgUsersToSalvage)
4940 if (DVI->getParent() == SrcBlock)
4941 DbgUsersToSink.push_back(DVI);
4942 llvm::sort(DbgUsersToSink,
4943 [](auto *A, auto *B) { return B->comesBefore(A); });
4944
4946 SmallSet<DebugVariable, 4> SunkVariables;
4947 for (auto *User : DbgUsersToSink) {
4948 // A dbg.declare instruction should not be cloned, since there can only be
4949 // one per variable fragment. It should be left in the original place
4950 // because the sunk instruction is not an alloca (otherwise we could not be
4951 // here).
4952 if (isa<DbgDeclareInst>(User))
4953 continue;
4954
4955 DebugVariable DbgUserVariable =
4956 DebugVariable(User->getVariable(), User->getExpression(),
4957 User->getDebugLoc()->getInlinedAt());
4958
4959 if (!SunkVariables.insert(DbgUserVariable).second)
4960 continue;
4961
4962 // Leave dbg.assign intrinsics in their original positions and there should
4963 // be no need to insert a clone.
4964 if (isa<DbgAssignIntrinsic>(User))
4965 continue;
4966
4967 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
4968 if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
4969 DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
4970 LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
4971 }
4972
4973 // Perform salvaging without the clones, then sink the clones.
4974 if (!DIIClones.empty()) {
4975 salvageDebugInfoForDbgValues(*I, DbgUsersToSalvage, {});
4976 // The clones are in reverse order of original appearance, reverse again to
4977 // maintain the original order.
4978 for (auto &DIIClone : llvm::reverse(DIIClones)) {
4979 DIIClone->insertBefore(&*InsertPos);
4980 LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
4981 }
4982 }
4983}
4984
4986 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
4987 BasicBlock *DestBlock,
4988 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
4989 // Implementation of tryToSinkInstructionDbgValues, but for the
4990 // DbgVariableRecord of variable assignments rather than dbg.values.
4991
4992 // Fetch all DbgVariableRecords not already in the destination.
4993 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
4994 for (auto &DVR : DbgVariableRecords)
4995 if (DVR->getParent() != DestBlock)
4996 DbgVariableRecordsToSalvage.push_back(DVR);
4997
4998 // Fetch a second collection, of DbgVariableRecords in the source block that
4999 // we're going to sink.
5000 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5001 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5002 if (DVR->getParent() == SrcBlock)
5003 DbgVariableRecordsToSink.push_back(DVR);
5004
5005 // Sort DbgVariableRecords according to their position in the block. This is a
5006 // partial order: DbgVariableRecords attached to different instructions will
5007 // be ordered by the instruction order, but DbgVariableRecords attached to the
5008 // same instruction won't have an order.
5009 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5010 return B->getInstruction()->comesBefore(A->getInstruction());
5011 };
5012 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5013
5014 // If there are two assignments to the same variable attached to the same
5015 // instruction, the ordering between the two assignments is important. Scan
5016 // for this (rare) case and establish which is the last assignment.
5017 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5019 if (DbgVariableRecordsToSink.size() > 1) {
5021 // Count how many assignments to each variable there is per instruction.
5022 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5023 DebugVariable DbgUserVariable =
5024 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5025 DVR->getDebugLoc()->getInlinedAt());
5026 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5027 }
5028
5029 // If there are any instructions with two assignments, add them to the
5030 // FilterOutMap to record that they need extra filtering.
5032 for (auto It : CountMap) {
5033 if (It.second > 1) {
5034 FilterOutMap[It.first] = nullptr;
5035 DupSet.insert(It.first.first);
5036 }
5037 }
5038
5039 // For all instruction/variable pairs needing extra filtering, find the
5040 // latest assignment.
5041 for (const Instruction *Inst : DupSet) {
5042 for (DbgVariableRecord &DVR :
5043 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5044 DebugVariable DbgUserVariable =
5045 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5046 DVR.getDebugLoc()->getInlinedAt());
5047 auto FilterIt =
5048 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5049 if (FilterIt == FilterOutMap.end())
5050 continue;
5051 if (FilterIt->second != nullptr)
5052 continue;
5053 FilterIt->second = &DVR;
5054 }
5055 }
5056 }
5057
5058 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5059 // out any duplicate assignments identified above.
5061 SmallSet<DebugVariable, 4> SunkVariables;
5062 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5064 continue;
5065
5066 DebugVariable DbgUserVariable =
5067 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5068 DVR->getDebugLoc()->getInlinedAt());
5069
5070 // For any variable where there were multiple assignments in the same place,
5071 // ignore all but the last assignment.
5072 if (!FilterOutMap.empty()) {
5073 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5074 auto It = FilterOutMap.find(IVP);
5075
5076 // Filter out.
5077 if (It != FilterOutMap.end() && It->second != DVR)
5078 continue;
5079 }
5080
5081 if (!SunkVariables.insert(DbgUserVariable).second)
5082 continue;
5083
5084 if (DVR->isDbgAssign())
5085 continue;
5086
5087 DVRClones.emplace_back(DVR->clone());
5088 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5089 }
5090
5091 // Perform salvaging without the clones, then sink the clones.
5092 if (DVRClones.empty())
5093 return;
5094
5095 salvageDebugInfoForDbgValues(*I, {}, DbgVariableRecordsToSalvage);
5096
5097 // The clones are in reverse order of original appearance. Assert that the
5098 // head bit is set on the iterator as we _should_ have received it via
5099 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5100 // we'll repeatedly insert at the head, such as:
5101 // DVR-3 (third insertion goes here)
5102 // DVR-2 (second insertion goes here)
5103 // DVR-1 (first insertion goes here)
5104 // Any-Prior-DVRs
5105 // InsertPtInst
5106 assert(InsertPos.getHeadBit());
5107 for (DbgVariableRecord *DVRClone : DVRClones) {
5108 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5109 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5110 }
5111}
5112
5114 while (!Worklist.isEmpty()) {
5115 // Walk deferred instructions in reverse order, and push them to the
5116 // worklist, which means they'll end up popped from the worklist in-order.
5117 while (Instruction *I = Worklist.popDeferred()) {
5118 // Check to see if we can DCE the instruction. We do this already here to
5119 // reduce the number of uses and thus allow other folds to trigger.
5120 // Note that eraseInstFromFunction() may push additional instructions on
5121 // the deferred worklist, so this will DCE whole instruction chains.
5124 ++NumDeadInst;
5125 continue;
5126 }
5127
5128 Worklist.push(I);
5129 }
5130
5132 if (I == nullptr) continue; // skip null values.
5133
5134 // Check to see if we can DCE the instruction.
5137 ++NumDeadInst;
5138 continue;
5139 }
5140
5141 if (!DebugCounter::shouldExecute(VisitCounter))
5142 continue;
5143
5144 // See if we can trivially sink this instruction to its user if we can
5145 // prove that the successor is not executed more frequently than our block.
5146 // Return the UserBlock if successful.
5147 auto getOptionalSinkBlockForInst =
5148 [this](Instruction *I) -> std::optional<BasicBlock *> {
5149 if (!EnableCodeSinking)
5150 return std::nullopt;
5151
5152 BasicBlock *BB = I->getParent();
5153 BasicBlock *UserParent = nullptr;
5154 unsigned NumUsers = 0;
5155
5156 for (Use &U : I->uses()) {
5157 User *User = U.getUser();
5158 if (User->isDroppable())
5159 continue;
5160 if (NumUsers > MaxSinkNumUsers)
5161 return std::nullopt;
5162
5163 Instruction *UserInst = cast<Instruction>(User);
5164 // Special handling for Phi nodes - get the block the use occurs in.
5165 BasicBlock *UserBB = UserInst->getParent();
5166 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5167 UserBB = PN->getIncomingBlock(U);
5168 // Bail out if we have uses in different blocks. We don't do any
5169 // sophisticated analysis (i.e finding NearestCommonDominator of these
5170 // use blocks).
5171 if (UserParent && UserParent != UserBB)
5172 return std::nullopt;
5173 UserParent = UserBB;
5174
5175 // Make sure these checks are done only once, naturally we do the checks
5176 // the first time we get the userparent, this will save compile time.
5177 if (NumUsers == 0) {
5178 // Try sinking to another block. If that block is unreachable, then do
5179 // not bother. SimplifyCFG should handle it.
5180 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5181 return std::nullopt;
5182
5183 auto *Term = UserParent->getTerminator();
5184 // See if the user is one of our successors that has only one
5185 // predecessor, so that we don't have to split the critical edge.
5186 // Another option where we can sink is a block that ends with a
5187 // terminator that does not pass control to other block (such as
5188 // return or unreachable or resume). In this case:
5189 // - I dominates the User (by SSA form);
5190 // - the User will be executed at most once.
5191 // So sinking I down to User is always profitable or neutral.
5192 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5193 return std::nullopt;
5194
5195 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5196 }
5197
5198 NumUsers++;
5199 }
5200
5201 // No user or only has droppable users.
5202 if (!UserParent)
5203 return std::nullopt;
5204
5205 return UserParent;
5206 };
5207
5208 auto OptBB = getOptionalSinkBlockForInst(I);
5209 if (OptBB) {
5210 auto *UserParent = *OptBB;
5211 // Okay, the CFG is simple enough, try to sink this instruction.
5212 if (tryToSinkInstruction(I, UserParent)) {
5213 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5214 MadeIRChange = true;
5215 // We'll add uses of the sunk instruction below, but since
5216 // sinking can expose opportunities for it's *operands* add
5217 // them to the worklist
5218 for (Use &U : I->operands())
5219 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5220 Worklist.push(OpI);
5221 }
5222 }
5223
5224 // Now that we have an instruction, try combining it to simplify it.
5227 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5228
5229#ifndef NDEBUG
5230 std::string OrigI;
5231#endif
5232 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5233 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5234
5235 if (Instruction *Result = visit(*I)) {
5236 ++NumCombined;
5237 // Should we replace the old instruction with a new one?
5238 if (Result != I) {
5239 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5240 << " New = " << *Result << '\n');
5241
5242 // We copy the old instruction's DebugLoc to the new instruction, unless
5243 // InstCombine already assigned a DebugLoc to it, in which case we
5244 // should trust the more specifically selected DebugLoc.
5245 if (!Result->getDebugLoc())
5246 Result->setDebugLoc(I->getDebugLoc());
5247 // We also copy annotation metadata to the new instruction.
5248 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5249 // Everything uses the new instruction now.
5250 I->replaceAllUsesWith(Result);
5251
5252 // Move the name to the new instruction first.
5253 Result->takeName(I);
5254
5255 // Insert the new instruction into the basic block...
5256 BasicBlock *InstParent = I->getParent();
5257 BasicBlock::iterator InsertPos = I->getIterator();
5258
5259 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5260 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5261 // We need to fix up the insertion point.
5262 if (isa<PHINode>(I)) // PHI -> Non-PHI
5263 InsertPos = InstParent->getFirstInsertionPt();
5264 else // Non-PHI -> PHI
5265 InsertPos = InstParent->getFirstNonPHIIt();
5266 }
5267
5268 Result->insertInto(InstParent, InsertPos);
5269
5270 // Push the new instruction and any users onto the worklist.
5272 Worklist.push(Result);
5273
5275 } else {
5276 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5277 << " New = " << *I << '\n');
5278
5279 // If the instruction was modified, it's possible that it is now dead.
5280 // if so, remove it.
5283 } else {
5285 Worklist.push(I);
5286 }
5287 }
5288 MadeIRChange = true;
5289 }
5290 }
5291
5292 Worklist.zap();
5293 return MadeIRChange;
5294}
5295
5296// Track the scopes used by !alias.scope and !noalias. In a function, a
5297// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5298// by both sets. If not, the declaration of the scope can be safely omitted.
5299// The MDNode of the scope can be omitted as well for the instructions that are
5300// part of this function. We do not do that at this point, as this might become
5301// too time consuming to do.
5303 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5304 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5305
5306public:
5308 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5309 if (!I->hasMetadataOtherThanDebugLoc())
5310 return;
5311
5312 auto Track = [](Metadata *ScopeList, auto &Container) {
5313 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5314 if (!MDScopeList || !Container.insert(MDScopeList).second)
5315 return;
5316 for (const auto &MDOperand : MDScopeList->operands())
5317 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5318 Container.insert(MDScope);
5319 };
5320
5321 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5322 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5323 }
5324
5326 NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
5327 if (!Decl)
5328 return false;
5329
5330 assert(Decl->use_empty() &&
5331 "llvm.experimental.noalias.scope.decl in use ?");
5332 const MDNode *MDSL = Decl->getScopeList();
5333 assert(MDSL->getNumOperands() == 1 &&
5334 "llvm.experimental.noalias.scope should refer to a single scope");
5335 auto &MDOperand = MDSL->getOperand(0);
5336 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5337 return !UsedAliasScopesAndLists.contains(MD) ||
5338 !UsedNoAliasScopesAndLists.contains(MD);
5339
5340 // Not an MDNode ? throw away.
5341 return true;
5342 }
5343};
5344
5345/// Populate the IC worklist from a function, by walking it in reverse
5346/// post-order and adding all reachable code to the worklist.
5347///
5348/// This has a couple of tricks to make the code faster and more powerful. In
5349/// particular, we constant fold and DCE instructions as we go, to avoid adding
5350/// them to the worklist (this significantly speeds up instcombine on code where
5351/// many instructions are dead or constant). Additionally, if we find a branch
5352/// whose condition is a known constant, we only visit the reachable successors.
5354 bool MadeIRChange = false;
5356 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5357 DenseMap<Constant *, Constant *> FoldedConstants;
5358 AliasScopeTracker SeenAliasScopes;
5359
5360 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5361 for (BasicBlock *Succ : successors(BB))
5362 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5363 for (PHINode &PN : Succ->phis())
5364 for (Use &U : PN.incoming_values())
5365 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5366 U.set(PoisonValue::get(PN.getType()));
5367 MadeIRChange = true;
5368 }
5369 };
5370
5371 for (BasicBlock *BB : RPOT) {
5372 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5373 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5374 })) {
5375 HandleOnlyLiveSuccessor(BB, nullptr);
5376 continue;
5377 }
5378 LiveBlocks.insert(BB);
5379
5380 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5381 // ConstantProp instruction if trivially constant.
5382 if (!Inst.use_empty() &&
5383 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5384 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5385 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5386 << '\n');
5387 Inst.replaceAllUsesWith(C);
5388 ++NumConstProp;
5389 if (isInstructionTriviallyDead(&Inst, &TLI))
5390 Inst.eraseFromParent();
5391 MadeIRChange = true;
5392 continue;
5393 }
5394
5395 // See if we can constant fold its operands.
5396 for (Use &U : Inst.operands()) {
5397 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5398 continue;
5399
5400 auto *C = cast<Constant>(U);
5401 Constant *&FoldRes = FoldedConstants[C];
5402 if (!FoldRes)
5403 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5404
5405 if (FoldRes != C) {
5406 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5407 << "\n Old = " << *C
5408 << "\n New = " << *FoldRes << '\n');
5409 U = FoldRes;
5410 MadeIRChange = true;
5411 }
5412 }
5413
5414 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5415 // these call instructions consumes non-trivial amount of time and
5416 // provides no value for the optimization.
5417 if (!Inst.isDebugOrPseudoInst()) {
5418 InstrsForInstructionWorklist.push_back(&Inst);
5419 SeenAliasScopes.analyse(&Inst);
5420 }
5421 }
5422
5423 // If this is a branch or switch on a constant, mark only the single
5424 // live successor. Otherwise assume all successors are live.
5425 Instruction *TI = BB->getTerminator();
5426 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5427 if (isa<UndefValue>(BI->getCondition())) {
5428 // Branch on undef is UB.
5429 HandleOnlyLiveSuccessor(BB, nullptr);
5430 continue;
5431 }
5432 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5433 bool CondVal = Cond->getZExtValue();
5434 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5435 continue;
5436 }
5437 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5438 if (isa<UndefValue>(SI->getCondition())) {
5439 // Switch on undef is UB.
5440 HandleOnlyLiveSuccessor(BB, nullptr);
5441 continue;
5442 }
5443 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5444 HandleOnlyLiveSuccessor(BB,
5445 SI->findCaseValue(Cond)->getCaseSuccessor());
5446 continue;
5447 }
5448 }
5449 }
5450
5451 // Remove instructions inside unreachable blocks. This prevents the
5452 // instcombine code from having to deal with some bad special cases, and
5453 // reduces use counts of instructions.
5454 for (BasicBlock &BB : F) {
5455 if (LiveBlocks.count(&BB))
5456 continue;
5457
5458 unsigned NumDeadInstInBB;
5459 unsigned NumDeadDbgInstInBB;
5460 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5462
5463 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5464 NumDeadInst += NumDeadInstInBB;
5465 }
5466
5467 // Once we've found all of the instructions to add to instcombine's worklist,
5468 // add them in reverse order. This way instcombine will visit from the top
5469 // of the function down. This jives well with the way that it adds all uses
5470 // of instructions to the worklist after doing a transformation, thus avoiding
5471 // some N^2 behavior in pathological cases.
5472 Worklist.reserve(InstrsForInstructionWorklist.size());
5473 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5474 // DCE instruction if trivially dead. As we iterate in reverse program
5475 // order here, we will clean up whole chains of dead instructions.
5476 if (isInstructionTriviallyDead(Inst, &TLI) ||
5477 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5478 ++NumDeadInst;
5479 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5480 salvageDebugInfo(*Inst);
5481 Inst->eraseFromParent();
5482 MadeIRChange = true;
5483 continue;
5484 }
5485
5486 Worklist.push(Inst);
5487 }
5488
5489 return MadeIRChange;
5490}
5491
5493 // Collect backedges.
5495 for (BasicBlock *BB : RPOT) {
5496 Visited.insert(BB);
5497 for (BasicBlock *Succ : successors(BB))
5498 if (Visited.contains(Succ))
5499 BackEdges.insert({BB, Succ});
5500 }
5501 ComputedBackEdges = true;
5502}
5503
5509 const InstCombineOptions &Opts) {
5510 auto &DL = F.getDataLayout();
5511 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5512 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5513
5514 /// Builder - This is an IRBuilder that automatically inserts new
5515 /// instructions into the worklist when they are created.
5517 F.getContext(), TargetFolder(DL),
5518 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5519 Worklist.add(I);
5520 if (auto *Assume = dyn_cast<AssumeInst>(I))
5521 AC.registerAssumption(Assume);
5522 }));
5523
5525
5526 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5527 // by instcombiner.
5528 bool MadeIRChange = false;
5530 MadeIRChange = LowerDbgDeclare(F);
5531
5532 // Iterate while there is work to do.
5533 unsigned Iteration = 0;
5534 while (true) {
5535 ++Iteration;
5536
5537 if (Iteration > Opts.MaxIterations && !VerifyFixpoint) {
5538 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5539 << " on " << F.getName()
5540 << " reached; stopping without verifying fixpoint\n");
5541 break;
5542 }
5543
5544 ++NumWorklistIterations;
5545 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
5546 << F.getName() << "\n");
5547
5548 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
5549 ORE, BFI, BPI, PSI, DL, RPOT);
5551 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
5552 MadeChangeInThisIteration |= IC.run();
5553 if (!MadeChangeInThisIteration)
5554 break;
5555
5556 MadeIRChange = true;
5557 if (Iteration > Opts.MaxIterations) {
5559 "Instruction Combining on " + Twine(F.getName()) +
5560 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
5561 " iterations. " +
5562 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5563 "'instcombine-no-verify-fixpoint' to suppress this error.",
5564 /*GenCrashDiag=*/false);
5565 }
5566 }
5567
5568 if (Iteration == 1)
5569 ++NumOneIteration;
5570 else if (Iteration == 2)
5571 ++NumTwoIterations;
5572 else if (Iteration == 3)
5573 ++NumThreeIterations;
5574 else
5575 ++NumFourOrMoreIterations;
5576
5577 return MadeIRChange;
5578}
5579
5581
5583 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
5584 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
5585 OS, MapClassName2PassName);
5586 OS << '<';
5587 OS << "max-iterations=" << Options.MaxIterations << ";";
5588 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
5589 OS << '>';
5590}
5591
5592char InstCombinePass::ID = 0;
5593
5596 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
5597 // No changes since last InstCombine pass, exit early.
5598 if (LRT.shouldSkip(&ID))
5599 return PreservedAnalyses::all();
5600
5601 auto &AC = AM.getResult<AssumptionAnalysis>(F);
5602 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
5603 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
5605 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
5606
5607 auto *AA = &AM.getResult<AAManager>(F);
5608 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
5609 ProfileSummaryInfo *PSI =
5610 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
5611 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5612 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
5614
5615 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5616 BFI, BPI, PSI, Options)) {
5617 // No changes, all analyses are preserved.
5618 LRT.update(&ID, /*Changed=*/false);
5619 return PreservedAnalyses::all();
5620 }
5621
5622 // Mark all the analyses that instcombine updates as preserved.
5624 LRT.update(&ID, /*Changed=*/true);
5627 return PA;
5628}
5629
5631 AU.setPreservesCFG();
5644}
5645
5647 if (skipFunction(F))
5648 return false;
5649
5650 // Required analyses.
5651 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5652 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5653 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
5654 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
5655 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5656 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5657
5658 // Optional analyses.
5659 ProfileSummaryInfo *PSI =
5660 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5661 BlockFrequencyInfo *BFI =
5662 (PSI && PSI->hasProfileSummary()) ?
5663 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5664 nullptr;
5665 BranchProbabilityInfo *BPI = nullptr;
5666 if (auto *WrapperPass =
5667 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5668 BPI = &WrapperPass->getBPI();
5669
5670 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
5671 BFI, BPI, PSI, InstCombineOptions());
5672}
5673
5675
5678}
5679
5681 "Combine redundant instructions", false, false)
5693
5694// Initialization Routines
5697}
5698
5700 return new InstructionCombiningPass();
5701}
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:190
#define LLVM_DEBUG(...)
Definition: Debug.h:106
This file defines the DenseMap class.
uint64_t Size
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
Hexagon Vector Combine
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition: IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned OpIndex
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition: APInt.cpp:1732
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:423
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition: APInt.cpp:1864
APInt trunc(unsigned width) const
Truncate to new width.
Definition: APInt.cpp:910
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1902
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:827
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition: APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition: APInt.cpp:1915
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:851
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:429
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition: Pass.cpp:256
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
Class to represent array types.
Definition: DerivedTypes.h:395
uint64_t getNumElements() const
Definition: DerivedTypes.h:407
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
Definition: DerivedTypes.h:408
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
Definition: Attributes.cpp:439
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:208
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:517
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
Definition: BasicBlock.cpp:250
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
Definition: BasicBlock.cpp:374
const Instruction & front() const
Definition: BasicBlock.h:471
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:571
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:467
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
Definition: BasicBlock.cpp:430
size_t size() const
Definition: BasicBlock.h:469
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition: InstrTypes.h:370
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition: InstrTypes.h:293
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition: InstrTypes.h:1428
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1923
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1294
AttributeList getAttributes() const
Return the attributes for this call.
Definition: InstrTypes.h:1425
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:825
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:787
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Definition: CmpPredicate.h:22
ConstantArray - Constant Array Declarations.
Definition: Constants.h:427
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1312
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:770
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2644
static Constant * getNot(Constant *C)
Definition: Constants.cpp:2631
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Definition: Constants.cpp:2637
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
Definition: Constants.cpp:2691
static Constant * getNeg(Constant *C, bool HasNSW=false)
Definition: Constants.cpp:2625
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:866
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:873
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:880
This class represents a range of values.
Definition: ConstantRange.h:47
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition: Constants.h:511
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1421
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:403
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
Definition: Constants.cpp:784
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:420
const Constant * stripPointerCasts() const
Definition: Constant.h:218
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:435
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
Definition: DataLayout.cpp:971
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
Definition: DataLayout.h:219
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:754
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
Definition: DataLayout.cpp:878
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:457
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:369
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:617
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
Definition: DataLayout.cpp:893
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:87
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:156
bool empty() const
Definition: DenseMap.h:98
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:205
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
const BasicBlock & getEntryBlock() const
Definition: Function.h:809
Represents flags for the getelementptr instruction/expression.
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
Definition: Operator.h:430
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:956
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Definition: Instructions.h:980
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:108
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1694
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition: IRBuilder.h:2549
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.cpp:1043
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2039
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2568
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1981
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:325
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Definition: IRBuilder.h:1876
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:1868
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
Definition: IRBuilder.h:247
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Definition: IRBuilder.cpp:879
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:890
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:500
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2398
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2429
Value * CreateNot(Value *V, const Twine &Name="")
Definition: IRBuilder.h:1751
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1381
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition: IRBuilder.h:1792
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition: IRBuilder.h:2527
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1512
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1364
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2013
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1665
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition: IRBuilder.h:2219
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:194
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:1493
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1556
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2374
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
Definition: IRBuilder.h:1688
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:530
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition: IRBuilder.h:516
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition: IRBuilder.h:74
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
Definition: InstCombiner.h:48
SimplifyQuery SQ
Definition: InstCombiner.h:77
const DataLayout & getDataLayout() const
Definition: InstCombiner.h:337
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
Definition: InstCombiner.h:228
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Definition: InstCombiner.h:143
TargetLibraryInfo & TLI
Definition: InstCombiner.h:74
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Definition: InstCombiner.h:368
AAResults * AA
Definition: InstCombiner.h:70
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
Definition: InstCombiner.h:388
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
Definition: InstCombiner.h:56
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
Definition: InstCombiner.h:187
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
Definition: InstCombiner.h:420
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
Definition: InstCombiner.h:160
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Definition: InstCombiner.h:65
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
Definition: InstCombiner.h:377
BranchProbabilityInfo * BPI
Definition: InstCombiner.h:80
ReversePostOrderTraversal< BasicBlock * > & RPOT
Definition: InstCombiner.h:84
const DataLayout & DL
Definition: InstCombiner.h:76
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Definition: InstCombiner.h:455
DomConditionCache DC
Definition: InstCombiner.h:82
const bool MinimizeSize
Definition: InstCombiner.h:68
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Definition: InstCombiner.h:332
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
Definition: InstCombiner.h:97
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
Definition: InstCombiner.h:412
DominatorTree & DT
Definition: InstCombiner.h:75
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
Definition: InstCombiner.h:280
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
Definition: InstCombiner.h:89
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
Definition: InstCombiner.h:433
BuilderTy & Builder
Definition: InstCombiner.h:61
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
Definition: InstCombiner.h:209
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
Definition: InstCombiner.h:358
void visit(Iterator Start, Iterator End)
Definition: InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition: InstCombine.h:66
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
void zap()
Check that the worklist is empty and nuke the backing store for the map.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
Definition: Instruction.h:328
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:475
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:68
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1764
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:72
bool isTerminator() const
Definition: Instruction.h:277
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
Definition: Instruction.h:333
bool isShift() const
Definition: Instruction.h:282
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:472
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
bool isIntDivRem() const
Definition: Instruction.h:280
Class to represent integer types.
Definition: DerivedTypes.h:42
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Definition: Instructions.h:176
Metadata node.
Definition: Metadata.h:1069
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1430
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition: Metadata.h:62
This class represents min/max intrinsics.
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Definition: PassManager.h:692
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:110
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:104
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition: Constants.h:1460
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1878
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition: Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition: SmallPtrSet.h:94
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:452
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
iterator erase(const_iterator CI)
Definition: SmallVector.h:737
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
typename SuperClass::iterator iterator
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Definition: TargetFolder.h:34
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const
Targets can implement their own combinations for target-specific intrinsics.
std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const
Can be used to implement target-specific instruction combining.
std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const
Can be used to implement target-specific instruction combining.
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Query the target whether the specified address space cast from FromAS to ToAS is valid.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:270
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:258
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:310
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:288
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition: User.cpp:21
op_iterator op_begin()
Definition: User.h:280
Value * getOperand(unsigned i) const
Definition: User.h:228
unsigned getNumOperands() const
Definition: User.h:250
op_iterator op_end()
Definition: User.h:282
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:740
bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition: Value.cpp:157
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition: Value.h:434
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:694
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:852
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isZero() const
Definition: TypeSize.h:156
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
reverse_self_iterator getReverseIterator()
Definition: ilist_node.h:135
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition: Intrinsics.cpp:731
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
Definition: PatternMatch.h:160
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
Definition: PatternMatch.h:100
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:982
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:826
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
Definition: PatternMatch.h:186
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Definition: PatternMatch.h:560
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
Definition: PatternMatch.h:305
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:864
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
Definition: PatternMatch.h:791
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
Definition: PatternMatch.h:152
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:480
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition: STLExtras.h:854
void stable_sort(R &&Range)
Definition: STLExtras.h:2037
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool succ_empty(const Instruction *I)
Definition: CFG.h:255
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition: Local.cpp:2877
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2448
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition: Local.cpp:2316
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
Definition: DebugInfo.cpp:162
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1683
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2115
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition: Local.cpp:2859
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
Definition: bit.h:146
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:406
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition: Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:44
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1664
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
Definition: Local.cpp:1990
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value that has an associated llvm....
Definition: Local.cpp:1731
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2787
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition: STLExtras.h:336
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition: STLExtras.h:2067
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
static unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:317
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:243
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:43
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:240
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition: PassManager.h:69
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const