LLVM 23.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147namespace llvm {
149} // end namespace llvm
150
151// FIXME: Remove this flag when it is no longer necessary to convert
152// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
153// increases variable availability at the cost of accuracy. Variables that
154// cannot be promoted by mem2reg or SROA will be described as living in memory
155// for their entire lifetime. However, passes like DSE and instcombine can
156// delete stores to the alloca, leading to misleading and inaccurate debug
157// information. This flag can be removed when those passes are fixed.
158static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
159 cl::Hidden, cl::init(true));
160
161std::optional<Instruction *>
163 // Handle target specific intrinsics
164 if (II.getCalledFunction()->isTargetIntrinsic()) {
165 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
166 }
167 return std::nullopt;
168}
169
171 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
172 bool &KnownBitsComputed) {
173 // Handle target specific intrinsics
174 if (II.getCalledFunction()->isTargetIntrinsic()) {
175 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
176 *this, II, DemandedMask, Known, KnownBitsComputed);
177 }
178 return std::nullopt;
179}
180
182 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
183 APInt &PoisonElts2, APInt &PoisonElts3,
184 std::function<void(Instruction *, unsigned, APInt, APInt &)>
185 SimplifyAndSetOp) {
186 // Handle target specific intrinsics
187 if (II.getCalledFunction()->isTargetIntrinsic()) {
188 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
189 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
190 SimplifyAndSetOp);
191 }
192 return std::nullopt;
193}
194
195bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
196 // Approved exception for TTI use: This queries a legality property of the
197 // target, not an profitability heuristic. Ideally this should be part of
198 // DataLayout instead.
199 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
200}
201
202Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
203 if (!RewriteGEP)
204 return llvm::emitGEPOffset(&Builder, DL, GEP);
205
206 IRBuilderBase::InsertPointGuard Guard(Builder);
207 auto *Inst = dyn_cast<Instruction>(GEP);
208 if (Inst)
209 Builder.SetInsertPoint(Inst);
210
211 Value *Offset = EmitGEPOffset(GEP);
212 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
213 if (Inst && !GEP->hasAllConstantIndices() &&
214 !GEP->getSourceElementType()->isIntegerTy(8)) {
216 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
217 Offset, "", GEP->getNoWrapFlags()));
219 }
220 return Offset;
221}
222
223Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
224 GEPNoWrapFlags NW, Type *IdxTy,
225 bool RewriteGEPs) {
226 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
227 if (Sum)
228 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
229 NW.isInBounds());
230 else
231 return Offset;
232 };
233
234 Value *Sum = nullptr;
235 Value *OneUseSum = nullptr;
236 Value *OneUseBase = nullptr;
237 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
238 for (GEPOperator *GEP : reverse(GEPs)) {
239 Value *Offset;
240 {
241 // Expand the offset at the point of the previous GEP to enable rewriting.
242 // However, use the original insertion point for calculating Sum.
243 IRBuilderBase::InsertPointGuard Guard(Builder);
244 auto *Inst = dyn_cast<Instruction>(GEP);
245 if (RewriteGEPs && Inst)
246 Builder.SetInsertPoint(Inst);
247
249 if (Offset->getType() != IdxTy)
250 Offset = Builder.CreateVectorSplat(
251 cast<VectorType>(IdxTy)->getElementCount(), Offset);
252 if (GEP->hasOneUse()) {
253 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
254 OneUseSum = Add(OneUseSum, Offset);
255 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
256 if (!OneUseBase)
257 OneUseBase = GEP->getPointerOperand();
258 continue;
259 }
260
261 if (OneUseSum)
262 Offset = Add(OneUseSum, Offset);
263
264 // Rewrite the GEP to reuse the computed offset. This also includes
265 // offsets from preceding one-use GEPs.
266 if (RewriteGEPs && Inst &&
267 !(GEP->getSourceElementType()->isIntegerTy(8) &&
268 GEP->getOperand(1) == Offset)) {
270 *Inst,
271 Builder.CreatePtrAdd(
272 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
273 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
275 }
276 }
277
278 Sum = Add(Sum, Offset);
279 OneUseSum = OneUseBase = nullptr;
280 OneUseFlags = GEPNoWrapFlags::all();
281 }
282 if (OneUseSum)
283 Sum = Add(Sum, OneUseSum);
284 if (!Sum)
285 return Constant::getNullValue(IdxTy);
286 return Sum;
287}
288
289/// Legal integers and common types are considered desirable. This is used to
290/// avoid creating instructions with types that may not be supported well by the
291/// the backend.
292/// NOTE: This treats i8, i16 and i32 specially because they are common
293/// types in frontend languages.
294bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
295 switch (BitWidth) {
296 case 8:
297 case 16:
298 case 32:
299 return true;
300 default:
301 return DL.isLegalInteger(BitWidth);
302 }
303}
304
305/// Return true if it is desirable to convert an integer computation from a
306/// given bit width to a new bit width.
307/// We don't want to convert from a legal or desirable type (like i8) to an
308/// illegal type or from a smaller to a larger illegal type. A width of '1'
309/// is always treated as a desirable type because i1 is a fundamental type in
310/// IR, and there are many specialized optimizations for i1 types.
311/// Common/desirable widths are equally treated as legal to convert to, in
312/// order to open up more combining opportunities.
313bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
314 unsigned ToWidth) const {
315 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
316 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
317
318 // Convert to desirable widths even if they are not legal types.
319 // Only shrink types, to prevent infinite loops.
320 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
321 return true;
322
323 // If this is a legal or desiable integer from type, and the result would be
324 // an illegal type, don't do the transformation.
325 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
326 return false;
327
328 // Otherwise, if both are illegal, do not increase the size of the result. We
329 // do allow things like i160 -> i64, but not i64 -> i160.
330 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
331 return false;
332
333 return true;
334}
335
336/// Return true if it is desirable to convert a computation from 'From' to 'To'.
337/// We don't want to convert from a legal to an illegal type or from a smaller
338/// to a larger illegal type. i1 is always treated as a legal type because it is
339/// a fundamental type in IR, and there are many specialized optimizations for
340/// i1 types.
341bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
342 // TODO: This could be extended to allow vectors. Datalayout changes might be
343 // needed to properly support that.
344 if (!From->isIntegerTy() || !To->isIntegerTy())
345 return false;
346
347 unsigned FromWidth = From->getPrimitiveSizeInBits();
348 unsigned ToWidth = To->getPrimitiveSizeInBits();
349 return shouldChangeType(FromWidth, ToWidth);
350}
351
352// Return true, if No Signed Wrap should be maintained for I.
353// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
354// where both B and C should be ConstantInts, results in a constant that does
355// not overflow. This function only handles the Add/Sub/Mul opcodes. For
356// all other opcodes, the function conservatively returns false.
359 if (!OBO || !OBO->hasNoSignedWrap())
360 return false;
361
362 const APInt *BVal, *CVal;
363 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
364 return false;
365
366 // We reason about Add/Sub/Mul Only.
367 bool Overflow = false;
368 switch (I.getOpcode()) {
369 case Instruction::Add:
370 (void)BVal->sadd_ov(*CVal, Overflow);
371 break;
372 case Instruction::Sub:
373 (void)BVal->ssub_ov(*CVal, Overflow);
374 break;
375 case Instruction::Mul:
376 (void)BVal->smul_ov(*CVal, Overflow);
377 break;
378 default:
379 // Conservatively return false for other opcodes.
380 return false;
381 }
382 return !Overflow;
383}
384
387 return OBO && OBO->hasNoUnsignedWrap();
388}
389
392 return OBO && OBO->hasNoSignedWrap();
393}
394
395/// Conservatively clears subclassOptionalData after a reassociation or
396/// commutation. We preserve fast-math flags when applicable as they can be
397/// preserved.
400 if (!FPMO) {
401 I.clearSubclassOptionalData();
402 return;
403 }
404
405 FastMathFlags FMF = I.getFastMathFlags();
406 I.clearSubclassOptionalData();
407 I.setFastMathFlags(FMF);
408}
409
410/// Combine constant operands of associative operations either before or after a
411/// cast to eliminate one of the associative operations:
412/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
413/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
415 InstCombinerImpl &IC) {
416 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
417 if (!Cast || !Cast->hasOneUse())
418 return false;
419
420 // TODO: Enhance logic for other casts and remove this check.
421 auto CastOpcode = Cast->getOpcode();
422 if (CastOpcode != Instruction::ZExt)
423 return false;
424
425 // TODO: Enhance logic for other BinOps and remove this check.
426 if (!BinOp1->isBitwiseLogicOp())
427 return false;
428
429 auto AssocOpcode = BinOp1->getOpcode();
430 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
431 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
432 return false;
433
434 Constant *C1, *C2;
435 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
436 !match(BinOp2->getOperand(1), m_Constant(C2)))
437 return false;
438
439 // TODO: This assumes a zext cast.
440 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
441 // to the destination type might lose bits.
442
443 // Fold the constants together in the destination type:
444 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
445 const DataLayout &DL = IC.getDataLayout();
446 Type *DestTy = C1->getType();
447 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
448 if (!CastC2)
449 return false;
450 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
451 if (!FoldedC)
452 return false;
453
454 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
455 IC.replaceOperand(*BinOp1, 1, FoldedC);
457 Cast->dropPoisonGeneratingFlags();
458 return true;
459}
460
461// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
462// inttoptr ( ptrtoint (x) ) --> x
463Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
464 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
465 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
466 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
467 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
468 Type *CastTy = IntToPtr->getDestTy();
469 if (PtrToInt &&
470 CastTy->getPointerAddressSpace() ==
471 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
472 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
473 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
474 return PtrToInt->getOperand(0);
475 }
476 return nullptr;
477}
478
479/// This performs a few simplifications for operators that are associative or
480/// commutative:
481///
482/// Commutative operators:
483///
484/// 1. Order operands such that they are listed from right (least complex) to
485/// left (most complex). This puts constants before unary operators before
486/// binary operators.
487///
488/// Associative operators:
489///
490/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
491/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
492///
493/// Associative and commutative operators:
494///
495/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
496/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
497/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
498/// if C1 and C2 are constants.
500 Instruction::BinaryOps Opcode = I.getOpcode();
501 bool Changed = false;
502
503 do {
504 // Order operands such that they are listed from right (least complex) to
505 // left (most complex). This puts constants before unary operators before
506 // binary operators.
507 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
508 getComplexity(I.getOperand(1)))
509 Changed = !I.swapOperands();
510
511 if (I.isCommutative()) {
512 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
513 replaceOperand(I, 0, Pair->first);
514 replaceOperand(I, 1, Pair->second);
515 Changed = true;
516 }
517 }
518
519 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
520 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
521
522 if (I.isAssociative()) {
523 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
524 if (Op0 && Op0->getOpcode() == Opcode) {
525 Value *A = Op0->getOperand(0);
526 Value *B = Op0->getOperand(1);
527 Value *C = I.getOperand(1);
528
529 // Does "B op C" simplify?
530 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
531 // It simplifies to V. Form "A op V".
532 replaceOperand(I, 0, A);
533 replaceOperand(I, 1, V);
534 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
535 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
536
537 // Conservatively clear all optional flags since they may not be
538 // preserved by the reassociation. Reset nsw/nuw based on the above
539 // analysis.
541
542 // Note: this is only valid because SimplifyBinOp doesn't look at
543 // the operands to Op0.
544 if (IsNUW)
545 I.setHasNoUnsignedWrap(true);
546
547 if (IsNSW)
548 I.setHasNoSignedWrap(true);
549
550 Changed = true;
551 ++NumReassoc;
552 continue;
553 }
554 }
555
556 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
557 if (Op1 && Op1->getOpcode() == Opcode) {
558 Value *A = I.getOperand(0);
559 Value *B = Op1->getOperand(0);
560 Value *C = Op1->getOperand(1);
561
562 // Does "A op B" simplify?
563 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
564 // It simplifies to V. Form "V op C".
565 replaceOperand(I, 0, V);
566 replaceOperand(I, 1, C);
567 // Conservatively clear the optional flags, since they may not be
568 // preserved by the reassociation.
570 Changed = true;
571 ++NumReassoc;
572 continue;
573 }
574 }
575 }
576
577 if (I.isAssociative() && I.isCommutative()) {
578 if (simplifyAssocCastAssoc(&I, *this)) {
579 Changed = true;
580 ++NumReassoc;
581 continue;
582 }
583
584 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
585 if (Op0 && Op0->getOpcode() == Opcode) {
586 Value *A = Op0->getOperand(0);
587 Value *B = Op0->getOperand(1);
588 Value *C = I.getOperand(1);
589
590 // Does "C op A" simplify?
591 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
592 // It simplifies to V. Form "V op B".
593 replaceOperand(I, 0, V);
594 replaceOperand(I, 1, B);
595 // Conservatively clear the optional flags, since they may not be
596 // preserved by the reassociation.
598 Changed = true;
599 ++NumReassoc;
600 continue;
601 }
602 }
603
604 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
605 if (Op1 && Op1->getOpcode() == Opcode) {
606 Value *A = I.getOperand(0);
607 Value *B = Op1->getOperand(0);
608 Value *C = Op1->getOperand(1);
609
610 // Does "C op A" simplify?
611 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
612 // It simplifies to V. Form "B op V".
613 replaceOperand(I, 0, B);
614 replaceOperand(I, 1, V);
615 // Conservatively clear the optional flags, since they may not be
616 // preserved by the reassociation.
618 Changed = true;
619 ++NumReassoc;
620 continue;
621 }
622 }
623
624 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
625 // if C1 and C2 are constants.
626 Value *A, *B;
627 Constant *C1, *C2, *CRes;
628 if (Op0 && Op1 &&
629 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
630 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
631 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
632 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
633 bool IsNUW = hasNoUnsignedWrap(I) &&
634 hasNoUnsignedWrap(*Op0) &&
635 hasNoUnsignedWrap(*Op1);
636 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
637 BinaryOperator::CreateNUW(Opcode, A, B) :
638 BinaryOperator::Create(Opcode, A, B);
639
640 if (isa<FPMathOperator>(NewBO)) {
641 FastMathFlags Flags = I.getFastMathFlags() &
642 Op0->getFastMathFlags() &
643 Op1->getFastMathFlags();
644 NewBO->setFastMathFlags(Flags);
645 }
646 InsertNewInstWith(NewBO, I.getIterator());
647 NewBO->takeName(Op1);
648 replaceOperand(I, 0, NewBO);
649 replaceOperand(I, 1, CRes);
650 // Conservatively clear the optional flags, since they may not be
651 // preserved by the reassociation.
653 if (IsNUW)
654 I.setHasNoUnsignedWrap(true);
655
656 Changed = true;
657 continue;
658 }
659 }
660
661 // No further simplifications.
662 return Changed;
663 } while (true);
664}
665
666/// Return whether "X LOp (Y ROp Z)" is always equal to
667/// "(X LOp Y) ROp (X LOp Z)".
670 // X & (Y | Z) <--> (X & Y) | (X & Z)
671 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
672 if (LOp == Instruction::And)
673 return ROp == Instruction::Or || ROp == Instruction::Xor;
674
675 // X | (Y & Z) <--> (X | Y) & (X | Z)
676 if (LOp == Instruction::Or)
677 return ROp == Instruction::And;
678
679 // X * (Y + Z) <--> (X * Y) + (X * Z)
680 // X * (Y - Z) <--> (X * Y) - (X * Z)
681 if (LOp == Instruction::Mul)
682 return ROp == Instruction::Add || ROp == Instruction::Sub;
683
684 return false;
685}
686
687/// Return whether "(X LOp Y) ROp Z" is always equal to
688/// "(X ROp Z) LOp (Y ROp Z)".
692 return leftDistributesOverRight(ROp, LOp);
693
694 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
696
697 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
698 // but this requires knowing that the addition does not overflow and other
699 // such subtleties.
700}
701
702/// This function returns identity value for given opcode, which can be used to
703/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
705 if (isa<Constant>(V))
706 return nullptr;
707
708 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
709}
710
711/// This function predicates factorization using distributive laws. By default,
712/// it just returns the 'Op' inputs. But for special-cases like
713/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
714/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
715/// allow more factorization opportunities.
718 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
719 assert(Op && "Expected a binary operator");
720 LHS = Op->getOperand(0);
721 RHS = Op->getOperand(1);
722 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
723 Constant *C;
724 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
725 // X << C --> X * (1 << C)
727 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
728 assert(RHS && "Constant folding of immediate constants failed");
729 return Instruction::Mul;
730 }
731 // TODO: We can add other conversions e.g. shr => div etc.
732 }
733 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
734 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
736 // lshr nneg C, X --> ashr nneg C, X
737 return Instruction::AShr;
738 }
739 }
740 return Op->getOpcode();
741}
742
743/// This tries to simplify binary operations by factorizing out common terms
744/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
747 Instruction::BinaryOps InnerOpcode, Value *A,
748 Value *B, Value *C, Value *D) {
749 assert(A && B && C && D && "All values must be provided");
750
751 Value *V = nullptr;
752 Value *RetVal = nullptr;
753 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
754 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
755
756 // Does "X op' Y" always equal "Y op' X"?
757 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
758
759 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
760 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
761 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
762 // commutative case, "(A op' B) op (C op' A)"?
763 if (A == C || (InnerCommutative && A == D)) {
764 if (A != C)
765 std::swap(C, D);
766 // Consider forming "A op' (B op D)".
767 // If "B op D" simplifies then it can be formed with no cost.
768 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
769
770 // If "B op D" doesn't simplify then only go on if one of the existing
771 // operations "A op' B" and "C op' D" will be zapped as no longer used.
772 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
773 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
774 if (V)
775 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
776 }
777 }
778
779 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
780 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
781 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
782 // commutative case, "(A op' B) op (B op' D)"?
783 if (B == D || (InnerCommutative && B == C)) {
784 if (B != D)
785 std::swap(C, D);
786 // Consider forming "(A op C) op' B".
787 // If "A op C" simplifies then it can be formed with no cost.
788 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
789
790 // If "A op C" doesn't simplify then only go on if one of the existing
791 // operations "A op' B" and "C op' D" will be zapped as no longer used.
792 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
793 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
794 if (V)
795 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
796 }
797 }
798
799 if (!RetVal)
800 return nullptr;
801
802 ++NumFactor;
803 RetVal->takeName(&I);
804
805 // Try to add no-overflow flags to the final value.
806 if (isa<BinaryOperator>(RetVal)) {
807 bool HasNSW = false;
808 bool HasNUW = false;
810 HasNSW = I.hasNoSignedWrap();
811 HasNUW = I.hasNoUnsignedWrap();
812 }
813 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
814 HasNSW &= LOBO->hasNoSignedWrap();
815 HasNUW &= LOBO->hasNoUnsignedWrap();
816 }
817
818 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
819 HasNSW &= ROBO->hasNoSignedWrap();
820 HasNUW &= ROBO->hasNoUnsignedWrap();
821 }
822
823 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
824 // We can propagate 'nsw' if we know that
825 // %Y = mul nsw i16 %X, C
826 // %Z = add nsw i16 %Y, %X
827 // =>
828 // %Z = mul nsw i16 %X, C+1
829 //
830 // iff C+1 isn't INT_MIN
831 const APInt *CInt;
832 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
833 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
834
835 // nuw can be propagated with any constant or nuw value.
836 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
837 }
838 }
839 return RetVal;
840}
841
842// If `I` has one Const operand and the other matches `(ctpop (not x))`,
843// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
844// This is only useful is the new subtract can fold so we only handle the
845// following cases:
846// 1) (add/sub/disjoint_or C, (ctpop (not x))
847// -> (add/sub/disjoint_or C', (ctpop x))
848// 1) (cmp pred C, (ctpop (not x))
849// -> (cmp pred C', (ctpop x))
851 unsigned Opc = I->getOpcode();
852 unsigned ConstIdx = 1;
853 switch (Opc) {
854 default:
855 return nullptr;
856 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
857 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
858 // is constant.
859 case Instruction::Sub:
860 ConstIdx = 0;
861 break;
862 case Instruction::ICmp:
863 // Signed predicates aren't correct in some edge cases like for i2 types, as
864 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
865 // comparisons against it are simplfied to unsigned.
866 if (cast<ICmpInst>(I)->isSigned())
867 return nullptr;
868 break;
869 case Instruction::Or:
870 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
871 return nullptr;
872 [[fallthrough]];
873 case Instruction::Add:
874 break;
875 }
876
877 Value *Op;
878 // Find ctpop.
879 if (!match(I->getOperand(1 - ConstIdx),
881 return nullptr;
882
883 Constant *C;
884 // Check other operand is ImmConstant.
885 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
886 return nullptr;
887
888 Type *Ty = Op->getType();
889 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
890 // Need extra check for icmp. Note if this check is true, it generally means
891 // the icmp will simplify to true/false.
892 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
893 Constant *Cmp =
895 if (!Cmp || !Cmp->isZeroValue())
896 return nullptr;
897 }
898
899 // Check we can invert `(not x)` for free.
900 bool Consumes = false;
901 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
902 return nullptr;
903 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
904 assert(NotOp != nullptr &&
905 "Desync between isFreeToInvert and getFreelyInverted");
906
907 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
908
909 Value *R = nullptr;
910
911 // Do the transformation here to avoid potentially introducing an infinite
912 // loop.
913 switch (Opc) {
914 case Instruction::Sub:
915 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
916 break;
917 case Instruction::Or:
918 case Instruction::Add:
919 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
920 break;
921 case Instruction::ICmp:
922 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
923 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
924 break;
925 default:
926 llvm_unreachable("Unhandled Opcode");
927 }
928 assert(R != nullptr);
929 return replaceInstUsesWith(*I, R);
930}
931
932// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
933// IFF
934// 1) the logic_shifts match
935// 2) either both binops are binops and one is `and` or
936// BinOp1 is `and`
937// (logic_shift (inv_logic_shift C1, C), C) == C1 or
938//
939// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
940//
941// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
942// IFF
943// 1) the logic_shifts match
944// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
945//
946// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
947//
948// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
949// IFF
950// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
951// 2) Binop2 is `not`
952//
953// -> (arithmetic_shift Binop1((not X), Y), Amt)
954
956 const DataLayout &DL = I.getDataLayout();
957 auto IsValidBinOpc = [](unsigned Opc) {
958 switch (Opc) {
959 default:
960 return false;
961 case Instruction::And:
962 case Instruction::Or:
963 case Instruction::Xor:
964 case Instruction::Add:
965 // Skip Sub as we only match constant masks which will canonicalize to use
966 // add.
967 return true;
968 }
969 };
970
971 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
972 // constraints.
973 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
974 unsigned ShOpc) {
975 assert(ShOpc != Instruction::AShr);
976 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
977 ShOpc == Instruction::Shl;
978 };
979
980 auto GetInvShift = [](unsigned ShOpc) {
981 assert(ShOpc != Instruction::AShr);
982 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
983 };
984
985 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
986 unsigned ShOpc, Constant *CMask,
987 Constant *CShift) {
988 // If the BinOp1 is `and` we don't need to check the mask.
989 if (BinOpc1 == Instruction::And)
990 return true;
991
992 // For all other possible transfers we need complete distributable
993 // binop/shift (anything but `add` + `lshr`).
994 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
995 return false;
996
997 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
998 // vecs, otherwise the mask will be simplified and the following check will
999 // handle it).
1000 if (BinOpc2 == Instruction::And)
1001 return true;
1002
1003 // Otherwise, need mask that meets the below requirement.
1004 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1005 Constant *MaskInvShift =
1006 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1007 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1008 CMask;
1009 };
1010
1011 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1012 Constant *CMask, *CShift;
1013 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1014 if (!match(I.getOperand(ShOpnum),
1015 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1016 return nullptr;
1017 if (!match(I.getOperand(1 - ShOpnum),
1019 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1020 m_Value(ShiftedX)),
1021 m_Value(Mask))))
1022 return nullptr;
1023 // Make sure we are matching instruction shifts and not ConstantExpr
1024 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1025 auto *IX = dyn_cast<Instruction>(ShiftedX);
1026 if (!IY || !IX)
1027 return nullptr;
1028
1029 // LHS and RHS need same shift opcode
1030 unsigned ShOpc = IY->getOpcode();
1031 if (ShOpc != IX->getOpcode())
1032 return nullptr;
1033
1034 // Make sure binop is real instruction and not ConstantExpr
1035 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1036 if (!BO2)
1037 return nullptr;
1038
1039 unsigned BinOpc = BO2->getOpcode();
1040 // Make sure we have valid binops.
1041 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1042 return nullptr;
1043
1044 if (ShOpc == Instruction::AShr) {
1045 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1046 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1047 Value *NotX = Builder.CreateNot(X);
1048 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1050 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1051 }
1052
1053 return nullptr;
1054 }
1055
1056 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1057 // distribute to drop the shift irrelevant of constants.
1058 if (BinOpc == I.getOpcode() &&
1059 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1060 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1061 Value *NewBinOp1 = Builder.CreateBinOp(
1062 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1063 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1064 }
1065
1066 // Otherwise we can only distribute by constant shifting the mask, so
1067 // ensure we have constants.
1068 if (!match(Shift, m_ImmConstant(CShift)))
1069 return nullptr;
1070 if (!match(Mask, m_ImmConstant(CMask)))
1071 return nullptr;
1072
1073 // Check if we can distribute the binops.
1074 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1075 return nullptr;
1076
1077 Constant *NewCMask =
1078 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1079 Value *NewBinOp2 = Builder.CreateBinOp(
1080 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1081 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1082 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1083 NewBinOp1, CShift);
1084 };
1085
1086 if (Instruction *R = MatchBinOp(0))
1087 return R;
1088 return MatchBinOp(1);
1089}
1090
1091// (Binop (zext C), (select C, T, F))
1092// -> (select C, (binop 1, T), (binop 0, F))
1093//
1094// (Binop (sext C), (select C, T, F))
1095// -> (select C, (binop -1, T), (binop 0, F))
1096//
1097// Attempt to simplify binary operations into a select with folded args, when
1098// one operand of the binop is a select instruction and the other operand is a
1099// zext/sext extension, whose value is the select condition.
1102 // TODO: this simplification may be extended to any speculatable instruction,
1103 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1104 Instruction::BinaryOps Opc = I.getOpcode();
1105 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1106 Value *A, *CondVal, *TrueVal, *FalseVal;
1107 Value *CastOp;
1108
1109 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1110 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1111 A->getType()->getScalarSizeInBits() == 1 &&
1112 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1113 m_Value(FalseVal)));
1114 };
1115
1116 // Make sure one side of the binop is a select instruction, and the other is a
1117 // zero/sign extension operating on a i1.
1118 if (MatchSelectAndCast(LHS, RHS))
1119 CastOp = LHS;
1120 else if (MatchSelectAndCast(RHS, LHS))
1121 CastOp = RHS;
1122 else
1123 return nullptr;
1124
1125 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1126 bool IsCastOpRHS = (CastOp == RHS);
1127 bool IsZExt = isa<ZExtInst>(CastOp);
1128 Constant *C;
1129
1130 if (IsTrueArm) {
1131 C = Constant::getNullValue(V->getType());
1132 } else if (IsZExt) {
1133 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1134 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1135 } else {
1136 C = Constant::getAllOnesValue(V->getType());
1137 }
1138
1139 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1140 : Builder.CreateBinOp(Opc, C, V);
1141 };
1142
1143 // If the value used in the zext/sext is the select condition, or the negated
1144 // of the select condition, the binop can be simplified.
1145 if (CondVal == A) {
1146 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1147 return SelectInst::Create(CondVal, NewTrueVal,
1148 NewFoldedConst(true, FalseVal));
1149 }
1150
1151 if (match(A, m_Not(m_Specific(CondVal)))) {
1152 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1153 return SelectInst::Create(CondVal, NewTrueVal,
1154 NewFoldedConst(false, FalseVal));
1155 }
1156
1157 return nullptr;
1158}
1159
1161 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1164 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1165 Value *A, *B, *C, *D;
1166 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1167
1168 if (Op0)
1169 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1170 if (Op1)
1171 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1172
1173 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1174 // a common term.
1175 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1176 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1177 return V;
1178
1179 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1180 // term.
1181 if (Op0)
1182 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1183 if (Value *V =
1184 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1185 return V;
1186
1187 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1188 // term.
1189 if (Op1)
1190 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1191 if (Value *V =
1192 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1193 return V;
1194
1195 return nullptr;
1196}
1197
1198/// This tries to simplify binary operations which some other binary operation
1199/// distributes over either by factorizing out common terms
1200/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1201/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1202/// Returns the simplified value, or null if it didn't simplify.
1204 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1207 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1208
1209 // Factorization.
1210 if (Value *R = tryFactorizationFolds(I))
1211 return R;
1212
1213 // Expansion.
1214 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1215 // The instruction has the form "(A op' B) op C". See if expanding it out
1216 // to "(A op C) op' (B op C)" results in simplifications.
1217 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1218 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1219
1220 // Disable the use of undef because it's not safe to distribute undef.
1221 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1222 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1223 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1224
1225 // Do "A op C" and "B op C" both simplify?
1226 if (L && R) {
1227 // They do! Return "L op' R".
1228 ++NumExpand;
1229 C = Builder.CreateBinOp(InnerOpcode, L, R);
1230 C->takeName(&I);
1231 return C;
1232 }
1233
1234 // Does "A op C" simplify to the identity value for the inner opcode?
1235 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1236 // They do! Return "B op C".
1237 ++NumExpand;
1238 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1239 C->takeName(&I);
1240 return C;
1241 }
1242
1243 // Does "B op C" simplify to the identity value for the inner opcode?
1244 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1245 // They do! Return "A op C".
1246 ++NumExpand;
1247 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1248 C->takeName(&I);
1249 return C;
1250 }
1251 }
1252
1253 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1254 // The instruction has the form "A op (B op' C)". See if expanding it out
1255 // to "(A op B) op' (A op C)" results in simplifications.
1256 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1257 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1258
1259 // Disable the use of undef because it's not safe to distribute undef.
1260 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1261 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1262 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1263
1264 // Do "A op B" and "A op C" both simplify?
1265 if (L && R) {
1266 // They do! Return "L op' R".
1267 ++NumExpand;
1268 A = Builder.CreateBinOp(InnerOpcode, L, R);
1269 A->takeName(&I);
1270 return A;
1271 }
1272
1273 // Does "A op B" simplify to the identity value for the inner opcode?
1274 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1275 // They do! Return "A op C".
1276 ++NumExpand;
1277 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1278 A->takeName(&I);
1279 return A;
1280 }
1281
1282 // Does "A op C" simplify to the identity value for the inner opcode?
1283 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1284 // They do! Return "A op B".
1285 ++NumExpand;
1286 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1287 A->takeName(&I);
1288 return A;
1289 }
1290 }
1291
1292 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1293}
1294
1295static std::optional<std::pair<Value *, Value *>>
1297 if (LHS->getParent() != RHS->getParent())
1298 return std::nullopt;
1299
1300 if (LHS->getNumIncomingValues() < 2)
1301 return std::nullopt;
1302
1303 if (!equal(LHS->blocks(), RHS->blocks()))
1304 return std::nullopt;
1305
1306 Value *L0 = LHS->getIncomingValue(0);
1307 Value *R0 = RHS->getIncomingValue(0);
1308
1309 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1310 Value *L1 = LHS->getIncomingValue(I);
1311 Value *R1 = RHS->getIncomingValue(I);
1312
1313 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1314 continue;
1315
1316 return std::nullopt;
1317 }
1318
1319 return std::optional(std::pair(L0, R0));
1320}
1321
1322std::optional<std::pair<Value *, Value *>>
1323InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1326 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1327 return std::nullopt;
1328 switch (LHSInst->getOpcode()) {
1329 case Instruction::PHI:
1331 case Instruction::Select: {
1332 Value *Cond = LHSInst->getOperand(0);
1333 Value *TrueVal = LHSInst->getOperand(1);
1334 Value *FalseVal = LHSInst->getOperand(2);
1335 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1336 FalseVal == RHSInst->getOperand(1))
1337 return std::pair(TrueVal, FalseVal);
1338 return std::nullopt;
1339 }
1340 case Instruction::Call: {
1341 // Match min(a, b) and max(a, b)
1342 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1343 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1344 if (LHSMinMax && RHSMinMax &&
1345 LHSMinMax->getPredicate() ==
1347 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1348 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1349 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1350 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1351 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1352 return std::nullopt;
1353 }
1354 default:
1355 return std::nullopt;
1356 }
1357}
1358
1360 Value *LHS,
1361 Value *RHS) {
1362 Value *A, *B, *C, *D, *E, *F;
1363 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1364 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1365 if (!LHSIsSelect && !RHSIsSelect)
1366 return nullptr;
1367
1369 ? nullptr
1370 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1371
1372 FastMathFlags FMF;
1374 if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
1375 FMF = FPOp->getFastMathFlags();
1376 Builder.setFastMathFlags(FMF);
1377 }
1378
1379 Instruction::BinaryOps Opcode = I.getOpcode();
1380 SimplifyQuery Q = SQ.getWithInstruction(&I);
1381
1382 Value *Cond, *True = nullptr, *False = nullptr;
1383
1384 // Special-case for add/negate combination. Replace the zero in the negation
1385 // with the trailing add operand:
1386 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1387 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1388 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1389 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1390 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1391 return nullptr;
1392 Value *N;
1393 if (True && match(FVal, m_Neg(m_Value(N)))) {
1394 Value *Sub = Builder.CreateSub(Z, N);
1395 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1396 }
1397 if (False && match(TVal, m_Neg(m_Value(N)))) {
1398 Value *Sub = Builder.CreateSub(Z, N);
1399 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1400 }
1401 return nullptr;
1402 };
1403
1404 if (LHSIsSelect && RHSIsSelect && A == D) {
1405 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1406 Cond = A;
1407 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1408 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1409
1410 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1411 if (False && !True)
1412 True = Builder.CreateBinOp(Opcode, B, E);
1413 else if (True && !False)
1414 False = Builder.CreateBinOp(Opcode, C, F);
1415 }
1416 } else if (LHSIsSelect && LHS->hasOneUse()) {
1417 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1418 Cond = A;
1419 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1420 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1421 if (Value *NewSel = foldAddNegate(B, C, RHS))
1422 return NewSel;
1423 } else if (RHSIsSelect && RHS->hasOneUse()) {
1424 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1425 Cond = D;
1426 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1427 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1428 if (Value *NewSel = foldAddNegate(E, F, LHS))
1429 return NewSel;
1430 }
1431
1432 if (!True || !False)
1433 return nullptr;
1434
1435 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1436 NewSI->takeName(&I);
1437 return NewSI;
1438}
1439
1440/// Freely adapt every user of V as-if V was changed to !V.
1441/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1443 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1444 for (User *U : make_early_inc_range(I->users())) {
1445 if (U == IgnoredUser)
1446 continue; // Don't consider this user.
1447 switch (cast<Instruction>(U)->getOpcode()) {
1448 case Instruction::Select: {
1449 auto *SI = cast<SelectInst>(U);
1450 SI->swapValues();
1451 SI->swapProfMetadata();
1452 break;
1453 }
1454 case Instruction::Br: {
1456 BI->swapSuccessors(); // swaps prof metadata too
1457 if (BPI)
1458 BPI->swapSuccEdgesProbabilities(BI->getParent());
1459 break;
1460 }
1461 case Instruction::Xor:
1463 // Add to worklist for DCE.
1465 break;
1466 default:
1467 llvm_unreachable("Got unexpected user - out of sync with "
1468 "canFreelyInvertAllUsersOf() ?");
1469 }
1470 }
1471
1472 // Update pre-existing debug value uses.
1473 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1474 llvm::findDbgValues(I, DbgVariableRecords);
1475
1476 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1477 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1478 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1479 Idx != End; ++Idx)
1480 if (DbgVal->getVariableLocationOp(Idx) == I)
1481 DbgVal->setExpression(
1482 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1483 }
1484}
1485
1486/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1487/// constant zero (which is the 'negate' form).
1488Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1489 Value *NegV;
1490 if (match(V, m_Neg(m_Value(NegV))))
1491 return NegV;
1492
1493 // Constants can be considered to be negated values if they can be folded.
1495 return ConstantExpr::getNeg(C);
1496
1498 if (C->getType()->getElementType()->isIntegerTy())
1499 return ConstantExpr::getNeg(C);
1500
1502 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1503 Constant *Elt = CV->getAggregateElement(i);
1504 if (!Elt)
1505 return nullptr;
1506
1507 if (isa<UndefValue>(Elt))
1508 continue;
1509
1510 if (!isa<ConstantInt>(Elt))
1511 return nullptr;
1512 }
1513 return ConstantExpr::getNeg(CV);
1514 }
1515
1516 // Negate integer vector splats.
1517 if (auto *CV = dyn_cast<Constant>(V))
1518 if (CV->getType()->isVectorTy() &&
1519 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1520 return ConstantExpr::getNeg(CV);
1521
1522 return nullptr;
1523}
1524
1525// Try to fold:
1526// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1527// -> ({s|u}itofp (int_binop x, y))
1528// 2) (fp_binop ({s|u}itofp x), FpC)
1529// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1530//
1531// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1532Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1533 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1535
1536 Type *FPTy = BO.getType();
1537 Type *IntTy = IntOps[0]->getType();
1538
1539 unsigned IntSz = IntTy->getScalarSizeInBits();
1540 // This is the maximum number of inuse bits by the integer where the int -> fp
1541 // casts are exact.
1542 unsigned MaxRepresentableBits =
1544
1545 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1546 // checks later on.
1547 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1548
1549 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1550 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1551 auto IsNonZero = [&](unsigned OpNo) -> bool {
1552 if (OpsKnown[OpNo].hasKnownBits() &&
1553 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1554 return true;
1555 return isKnownNonZero(IntOps[OpNo], SQ);
1556 };
1557
1558 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1559 // NB: This matches the impl in ValueTracking, we just try to use cached
1560 // knownbits here. If we ever start supporting WithCache for
1561 // `isKnownNonNegative`, change this to an explicit call.
1562 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1563 };
1564
1565 // Check if we know for certain that ({s|u}itofp op) is exact.
1566 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1567 // Can we treat this operand as the desired sign?
1568 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1569 !IsNonNeg(OpNo))
1570 return false;
1571
1572 // If fp precision >= bitwidth(op) then its exact.
1573 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1574 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1575 // handled specially. We can't, however, increase the bound arbitrarily for
1576 // `sitofp` as for larger sizes, it won't sign extend.
1577 if (MaxRepresentableBits < IntSz) {
1578 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1579 // numSignBits(op).
1580 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1581 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1582 if (OpsFromSigned)
1583 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1584 // Finally for unsigned check that fp precision >= bitwidth(op) -
1585 // numLeadingZeros(op).
1586 else {
1587 NumUsedLeadingBits[OpNo] =
1588 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1589 }
1590 }
1591 // NB: We could also check if op is known to be a power of 2 or zero (which
1592 // will always be representable). Its unlikely, however, that is we are
1593 // unable to bound op in any way we will be able to pass the overflow checks
1594 // later on.
1595
1596 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1597 return false;
1598 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1599 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1600 IsNonZero(OpNo);
1601 };
1602
1603 // If we have a constant rhs, see if we can losslessly convert it to an int.
1604 if (Op1FpC != nullptr) {
1605 // Signed + Mul req non-zero
1606 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1607 !match(Op1FpC, m_NonZeroFP()))
1608 return nullptr;
1609
1611 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1612 IntTy, DL);
1613 if (Op1IntC == nullptr)
1614 return nullptr;
1615 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1616 : Instruction::UIToFP,
1617 Op1IntC, FPTy, DL) != Op1FpC)
1618 return nullptr;
1619
1620 // First try to keep sign of cast the same.
1621 IntOps[1] = Op1IntC;
1622 }
1623
1624 // Ensure lhs/rhs integer types match.
1625 if (IntTy != IntOps[1]->getType())
1626 return nullptr;
1627
1628 if (Op1FpC == nullptr) {
1629 if (!IsValidPromotion(1))
1630 return nullptr;
1631 }
1632 if (!IsValidPromotion(0))
1633 return nullptr;
1634
1635 // Final we check if the integer version of the binop will not overflow.
1637 // Because of the precision check, we can often rule out overflows.
1638 bool NeedsOverflowCheck = true;
1639 // Try to conservatively rule out overflow based on the already done precision
1640 // checks.
1641 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1642 unsigned OverflowMaxCurBits =
1643 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1644 bool OutputSigned = OpsFromSigned;
1645 switch (BO.getOpcode()) {
1646 case Instruction::FAdd:
1647 IntOpc = Instruction::Add;
1648 OverflowMaxOutputBits += OverflowMaxCurBits;
1649 break;
1650 case Instruction::FSub:
1651 IntOpc = Instruction::Sub;
1652 OverflowMaxOutputBits += OverflowMaxCurBits;
1653 break;
1654 case Instruction::FMul:
1655 IntOpc = Instruction::Mul;
1656 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1657 break;
1658 default:
1659 llvm_unreachable("Unsupported binop");
1660 }
1661 // The precision check may have already ruled out overflow.
1662 if (OverflowMaxOutputBits < IntSz) {
1663 NeedsOverflowCheck = false;
1664 // We can bound unsigned overflow from sub to in range signed value (this is
1665 // what allows us to avoid the overflow check for sub).
1666 if (IntOpc == Instruction::Sub)
1667 OutputSigned = true;
1668 }
1669
1670 // Precision check did not rule out overflow, so need to check.
1671 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1672 // `IntOps[...]` arguments to `KnownOps[...]`.
1673 if (NeedsOverflowCheck &&
1674 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1675 return nullptr;
1676
1677 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1678 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1679 IntBO->setHasNoSignedWrap(OutputSigned);
1680 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1681 }
1682 if (OutputSigned)
1683 return new SIToFPInst(IntBinOp, FPTy);
1684 return new UIToFPInst(IntBinOp, FPTy);
1685}
1686
1687// Try to fold:
1688// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1689// -> ({s|u}itofp (int_binop x, y))
1690// 2) (fp_binop ({s|u}itofp x), FpC)
1691// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1692Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1693 // Don't perform the fold on vectors, as the integer operation may be much
1694 // more expensive than the float operation in that case.
1695 if (BO.getType()->isVectorTy())
1696 return nullptr;
1697
1698 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1699 Constant *Op1FpC = nullptr;
1700 // Check for:
1701 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1702 // 2) (binop ({s|u}itofp x), FpC)
1703 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1704 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1705 return nullptr;
1706
1707 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1708 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1709 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1710 return nullptr;
1711
1712 // Cache KnownBits a bit to potentially save some analysis.
1713 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1714
1715 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1716 // different constraints depending on the sign of the cast.
1717 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1718 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1719 IntOps, Op1FpC, OpsKnown))
1720 return R;
1721 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1722 Op1FpC, OpsKnown);
1723}
1724
1725/// A binop with a constant operand and a sign-extended boolean operand may be
1726/// converted into a select of constants by applying the binary operation to
1727/// the constant with the two possible values of the extended boolean (0 or -1).
1728Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1729 // TODO: Handle non-commutative binop (constant is operand 0).
1730 // TODO: Handle zext.
1731 // TODO: Peek through 'not' of cast.
1732 Value *BO0 = BO.getOperand(0);
1733 Value *BO1 = BO.getOperand(1);
1734 Value *X;
1735 Constant *C;
1736 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1737 !X->getType()->isIntOrIntVectorTy(1))
1738 return nullptr;
1739
1740 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1743 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1744 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1745 return createSelectInstWithUnknownProfile(X, TVal, FVal);
1746}
1747
1749 bool IsTrueArm) {
1751 for (Value *Op : I.operands()) {
1752 Value *V = nullptr;
1753 if (Op == SI) {
1754 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1755 } else if (match(SI->getCondition(),
1758 m_Specific(Op), m_Value(V))) &&
1760 // Pass
1761 } else if (match(Op, m_ZExt(m_Specific(SI->getCondition())))) {
1762 V = IsTrueArm ? ConstantInt::get(Op->getType(), 1)
1763 : ConstantInt::getNullValue(Op->getType());
1764 } else {
1765 V = Op;
1766 }
1767 Ops.push_back(V);
1768 }
1769
1770 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1771}
1772
1774 Value *NewOp, InstCombiner &IC) {
1775 Instruction *Clone = I.clone();
1776 Clone->replaceUsesOfWith(SI, NewOp);
1778 IC.InsertNewInstBefore(Clone, I.getIterator());
1779 return Clone;
1780}
1781
1783 bool FoldWithMultiUse,
1784 bool SimplifyBothArms) {
1785 // Don't modify shared select instructions unless set FoldWithMultiUse
1786 if (!SI->hasOneUser() && !FoldWithMultiUse)
1787 return nullptr;
1788
1789 Value *TV = SI->getTrueValue();
1790 Value *FV = SI->getFalseValue();
1791
1792 // Bool selects with constant operands can be folded to logical ops.
1793 if (SI->getType()->isIntOrIntVectorTy(1))
1794 return nullptr;
1795
1796 // Avoid breaking min/max reduction pattern,
1797 // which is necessary for vectorization later.
1799 for (Value *IntrinOp : Op.operands())
1800 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1801 for (Value *PhiOp : PN->operands())
1802 if (PhiOp == &Op)
1803 return nullptr;
1804
1805 // Test if a FCmpInst instruction is used exclusively by a select as
1806 // part of a minimum or maximum operation. If so, refrain from doing
1807 // any other folding. This helps out other analyses which understand
1808 // non-obfuscated minimum and maximum idioms. And in this case, at
1809 // least one of the comparison operands has at least one user besides
1810 // the compare (the select), which would often largely negate the
1811 // benefit of folding anyway.
1812 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1813 if (CI->hasOneUse()) {
1814 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1815 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1816 !CI->isCommutative())
1817 return nullptr;
1818 }
1819 }
1820
1821 // Make sure that one of the select arms folds successfully.
1822 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1823 Value *NewFV =
1824 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1825 if (!NewTV && !NewFV)
1826 return nullptr;
1827
1828 if (SimplifyBothArms && !(NewTV && NewFV))
1829 return nullptr;
1830
1831 // Create an instruction for the arm that did not fold.
1832 if (!NewTV)
1833 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1834 if (!NewFV)
1835 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1836 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1837}
1838
1840 Value *InValue, BasicBlock *InBB,
1841 const DataLayout &DL,
1842 const SimplifyQuery SQ) {
1843 // NB: It is a precondition of this transform that the operands be
1844 // phi translatable!
1846 for (Value *Op : I.operands()) {
1847 if (Op == PN)
1848 Ops.push_back(InValue);
1849 else
1850 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1851 }
1852
1853 // Don't consider the simplification successful if we get back a constant
1854 // expression. That's just an instruction in hiding.
1855 // Also reject the case where we simplify back to the phi node. We wouldn't
1856 // be able to remove it in that case.
1858 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1859 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1860 return NewVal;
1861
1862 // Check if incoming PHI value can be replaced with constant
1863 // based on implied condition.
1864 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1865 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1866 if (TerminatorBI && TerminatorBI->isConditional() &&
1867 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1868 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1869 std::optional<bool> ImpliedCond = isImpliedCondition(
1870 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1871 DL, LHSIsTrue);
1872 if (ImpliedCond)
1873 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1874 }
1875
1876 return nullptr;
1877}
1878
1879/// In some cases it is beneficial to fold a select into a binary operator.
1880/// For example:
1881/// %1 = or %in, 4
1882/// %2 = select %cond, %1, %in
1883/// %3 = or %2, 1
1884/// =>
1885/// %1 = select i1 %cond, 5, 1
1886/// %2 = or %1, %in
1888 assert(Op.isAssociative() && "The operation must be associative!");
1889
1890 SelectInst *SI = dyn_cast<SelectInst>(Op.getOperand(0));
1891
1892 Constant *Const;
1893 if (!SI || !match(Op.getOperand(1), m_ImmConstant(Const)) ||
1894 !Op.hasOneUse() || !SI->hasOneUse())
1895 return nullptr;
1896
1897 Value *TV = SI->getTrueValue();
1898 Value *FV = SI->getFalseValue();
1899 Value *Input, *NewTV, *NewFV;
1900 Constant *Const2;
1901
1902 if (TV->hasOneUse() && match(TV, m_BinOp(Op.getOpcode(), m_Specific(FV),
1903 m_ImmConstant(Const2)))) {
1904 NewTV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1905 NewFV = Const;
1906 Input = FV;
1907 } else if (FV->hasOneUse() &&
1908 match(FV, m_BinOp(Op.getOpcode(), m_Specific(TV),
1909 m_ImmConstant(Const2)))) {
1910 NewTV = Const;
1911 NewFV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1912 Input = TV;
1913 } else
1914 return nullptr;
1915
1916 if (!NewTV || !NewFV)
1917 return nullptr;
1918
1919 Value *NewSI = Builder.CreateSelect(SI->getCondition(), NewTV, NewFV);
1920 return BinaryOperator::Create(Op.getOpcode(), NewSI, Input);
1921}
1922
1924 bool AllowMultipleUses) {
1925 unsigned NumPHIValues = PN->getNumIncomingValues();
1926 if (NumPHIValues == 0)
1927 return nullptr;
1928
1929 // We normally only transform phis with a single use. However, if a PHI has
1930 // multiple uses and they are all the same operation, we can fold *all* of the
1931 // uses into the PHI.
1932 bool OneUse = PN->hasOneUse();
1933 bool IdenticalUsers = false;
1934 if (!AllowMultipleUses && !OneUse) {
1935 // Walk the use list for the instruction, comparing them to I.
1936 for (User *U : PN->users()) {
1938 if (UI != &I && !I.isIdenticalTo(UI))
1939 return nullptr;
1940 }
1941 // Otherwise, we can replace *all* users with the new PHI we form.
1942 IdenticalUsers = true;
1943 }
1944
1945 // Check that all operands are phi-translatable.
1946 for (Value *Op : I.operands()) {
1947 if (Op == PN)
1948 continue;
1949
1950 // Non-instructions never require phi-translation.
1951 auto *I = dyn_cast<Instruction>(Op);
1952 if (!I)
1953 continue;
1954
1955 // Phi-translate can handle phi nodes in the same block.
1956 if (isa<PHINode>(I))
1957 if (I->getParent() == PN->getParent())
1958 continue;
1959
1960 // Operand dominates the block, no phi-translation necessary.
1961 if (DT.dominates(I, PN->getParent()))
1962 continue;
1963
1964 // Not phi-translatable, bail out.
1965 return nullptr;
1966 }
1967
1968 // Check to see whether the instruction can be folded into each phi operand.
1969 // If there is one operand that does not fold, remember the BB it is in.
1970 SmallVector<Value *> NewPhiValues;
1971 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1972 bool SeenNonSimplifiedInVal = false;
1973 for (unsigned i = 0; i != NumPHIValues; ++i) {
1974 Value *InVal = PN->getIncomingValue(i);
1975 BasicBlock *InBB = PN->getIncomingBlock(i);
1976
1977 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1978 NewPhiValues.push_back(NewVal);
1979 continue;
1980 }
1981
1982 // Handle some cases that can't be fully simplified, but where we know that
1983 // the two instructions will fold into one.
1984 auto WillFold = [&]() {
1985 if (!InVal->hasUseList() || !InVal->hasOneUser())
1986 return false;
1987
1988 // icmp of ucmp/scmp with constant will fold to icmp.
1989 const APInt *Ignored;
1990 if (isa<CmpIntrinsic>(InVal) &&
1991 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1992 return true;
1993
1994 // icmp eq zext(bool), 0 will fold to !bool.
1995 if (isa<ZExtInst>(InVal) &&
1996 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1997 match(&I,
1999 return true;
2000
2001 return false;
2002 };
2003
2004 if (WillFold()) {
2005 OpsToMoveUseToIncomingBB.push_back(i);
2006 NewPhiValues.push_back(nullptr);
2007 continue;
2008 }
2009
2010 if (!OneUse && !IdenticalUsers)
2011 return nullptr;
2012
2013 if (SeenNonSimplifiedInVal)
2014 return nullptr; // More than one non-simplified value.
2015 SeenNonSimplifiedInVal = true;
2016
2017 // If there is exactly one non-simplified value, we can insert a copy of the
2018 // operation in that block. However, if this is a critical edge, we would
2019 // be inserting the computation on some other paths (e.g. inside a loop).
2020 // Only do this if the pred block is unconditionally branching into the phi
2021 // block. Also, make sure that the pred block is not dead code.
2023 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
2024 return nullptr;
2025
2026 NewPhiValues.push_back(nullptr);
2027 OpsToMoveUseToIncomingBB.push_back(i);
2028
2029 // Do not push the operation across a loop backedge. This could result in
2030 // an infinite combine loop, and is generally non-profitable (especially
2031 // if the operation was originally outside the loop).
2032 if (isBackEdge(InBB, PN->getParent()))
2033 return nullptr;
2034 }
2035
2036 // Clone the instruction that uses the phi node and move it into the incoming
2037 // BB because we know that the next iteration of InstCombine will simplify it.
2039 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
2041 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
2042
2043 Instruction *Clone = Clones.lookup(OpBB);
2044 if (!Clone) {
2045 Clone = I.clone();
2046 for (Use &U : Clone->operands()) {
2047 if (U == PN)
2048 U = Op;
2049 else
2050 U = U->DoPHITranslation(PN->getParent(), OpBB);
2051 }
2052 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2053 Clones.insert({OpBB, Clone});
2054 // We may have speculated the instruction.
2056 }
2057
2058 NewPhiValues[OpIndex] = Clone;
2059 }
2060
2061 // Okay, we can do the transformation: create the new PHI node.
2062 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2063 InsertNewInstBefore(NewPN, PN->getIterator());
2064 NewPN->takeName(PN);
2065 NewPN->setDebugLoc(PN->getDebugLoc());
2066
2067 for (unsigned i = 0; i != NumPHIValues; ++i)
2068 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2069
2070 if (IdenticalUsers) {
2071 // Collect and deduplicate users up-front to avoid iterator invalidation.
2073 for (User *U : PN->users()) {
2075 if (User == &I)
2076 continue;
2077 ToReplace.insert(User);
2078 }
2079 for (Instruction *I : ToReplace) {
2080 replaceInstUsesWith(*I, NewPN);
2082 }
2083 OneUse = true;
2084 }
2085
2086 if (OneUse) {
2087 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2088 }
2089 return replaceInstUsesWith(I, NewPN);
2090}
2091
2093 if (!BO.isAssociative())
2094 return nullptr;
2095
2096 // Find the interleaved binary ops.
2097 auto Opc = BO.getOpcode();
2098 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2099 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2100 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2101 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2102 !BO0->isAssociative() || !BO1->isAssociative() ||
2103 BO0->getParent() != BO1->getParent())
2104 return nullptr;
2105
2106 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2107 "Expected commutative instructions!");
2108
2109 // Find the matching phis, forming the recurrences.
2110 PHINode *PN0, *PN1;
2111 Value *Start0, *Step0, *Start1, *Step1;
2112 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2113 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2114 PN0->getParent() != PN1->getParent())
2115 return nullptr;
2116
2117 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2118 "Expected PHIs with two incoming values!");
2119
2120 // Convert the start and step values to constants.
2121 auto *Init0 = dyn_cast<Constant>(Start0);
2122 auto *Init1 = dyn_cast<Constant>(Start1);
2123 auto *C0 = dyn_cast<Constant>(Step0);
2124 auto *C1 = dyn_cast<Constant>(Step1);
2125 if (!Init0 || !Init1 || !C0 || !C1)
2126 return nullptr;
2127
2128 // Fold the recurrence constants.
2129 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2130 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2131 if (!Init || !C)
2132 return nullptr;
2133
2134 // Create the reduced PHI.
2135 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2136 "reduced.phi");
2137
2138 // Create the new binary op.
2139 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2140 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2141 // Intersect FMF flags for FADD and FMUL.
2142 FastMathFlags Intersect = BO0->getFastMathFlags() &
2143 BO1->getFastMathFlags() & BO.getFastMathFlags();
2144 NewBO->setFastMathFlags(Intersect);
2145 } else {
2146 OverflowTracking Flags;
2147 Flags.AllKnownNonNegative = false;
2148 Flags.AllKnownNonZero = false;
2149 Flags.mergeFlags(*BO0);
2150 Flags.mergeFlags(*BO1);
2151 Flags.mergeFlags(BO);
2152 Flags.applyFlags(*NewBO);
2153 }
2154 NewBO->takeName(&BO);
2155
2156 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2157 auto *V = PN0->getIncomingValue(I);
2158 auto *BB = PN0->getIncomingBlock(I);
2159 if (V == Init0) {
2160 assert(((PN1->getIncomingValue(0) == Init1 &&
2161 PN1->getIncomingBlock(0) == BB) ||
2162 (PN1->getIncomingValue(1) == Init1 &&
2163 PN1->getIncomingBlock(1) == BB)) &&
2164 "Invalid incoming block!");
2165 NewPN->addIncoming(Init, BB);
2166 } else if (V == BO0) {
2167 assert(((PN1->getIncomingValue(0) == BO1 &&
2168 PN1->getIncomingBlock(0) == BB) ||
2169 (PN1->getIncomingValue(1) == BO1 &&
2170 PN1->getIncomingBlock(1) == BB)) &&
2171 "Invalid incoming block!");
2172 NewPN->addIncoming(NewBO, BB);
2173 } else
2174 llvm_unreachable("Unexpected incoming value!");
2175 }
2176
2177 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2178 << "\n with " << *PN1 << "\n " << *BO1
2179 << '\n');
2180
2181 // Insert the new recurrence and remove the old (dead) ones.
2182 InsertNewInstWith(NewPN, PN0->getIterator());
2183 InsertNewInstWith(NewBO, BO0->getIterator());
2184
2191
2192 return replaceInstUsesWith(BO, NewBO);
2193}
2194
2196 // Attempt to fold binary operators whose operands are simple recurrences.
2197 if (auto *NewBO = foldBinopWithRecurrence(BO))
2198 return NewBO;
2199
2200 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2201 // we are guarding against replicating the binop in >1 predecessor.
2202 // This could miss matching a phi with 2 constant incoming values.
2203 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2204 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2205 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2206 Phi0->getNumOperands() != Phi1->getNumOperands())
2207 return nullptr;
2208
2209 // TODO: Remove the restriction for binop being in the same block as the phis.
2210 if (BO.getParent() != Phi0->getParent() ||
2211 BO.getParent() != Phi1->getParent())
2212 return nullptr;
2213
2214 // Fold if there is at least one specific constant value in phi0 or phi1's
2215 // incoming values that comes from the same block and this specific constant
2216 // value can be used to do optimization for specific binary operator.
2217 // For example:
2218 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2219 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2220 // %add = add i32 %phi0, %phi1
2221 // ==>
2222 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2224 /*AllowRHSConstant*/ false);
2225 if (C) {
2226 SmallVector<Value *, 4> NewIncomingValues;
2227 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2228 auto &Phi0Use = std::get<0>(T);
2229 auto &Phi1Use = std::get<1>(T);
2230 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2231 return false;
2232 Value *Phi0UseV = Phi0Use.get();
2233 Value *Phi1UseV = Phi1Use.get();
2234 if (Phi0UseV == C)
2235 NewIncomingValues.push_back(Phi1UseV);
2236 else if (Phi1UseV == C)
2237 NewIncomingValues.push_back(Phi0UseV);
2238 else
2239 return false;
2240 return true;
2241 };
2242
2243 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2244 CanFoldIncomingValuePair)) {
2245 PHINode *NewPhi =
2246 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2247 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2248 "The number of collected incoming values should equal the number "
2249 "of the original PHINode operands!");
2250 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2251 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2252 return NewPhi;
2253 }
2254 }
2255
2256 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2257 return nullptr;
2258
2259 // Match a pair of incoming constants for one of the predecessor blocks.
2260 BasicBlock *ConstBB, *OtherBB;
2261 Constant *C0, *C1;
2262 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2263 ConstBB = Phi0->getIncomingBlock(0);
2264 OtherBB = Phi0->getIncomingBlock(1);
2265 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2266 ConstBB = Phi0->getIncomingBlock(1);
2267 OtherBB = Phi0->getIncomingBlock(0);
2268 } else {
2269 return nullptr;
2270 }
2271 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2272 return nullptr;
2273
2274 // The block that we are hoisting to must reach here unconditionally.
2275 // Otherwise, we could be speculatively executing an expensive or
2276 // non-speculative op.
2277 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2278 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2279 !DT.isReachableFromEntry(OtherBB))
2280 return nullptr;
2281
2282 // TODO: This check could be tightened to only apply to binops (div/rem) that
2283 // are not safe to speculatively execute. But that could allow hoisting
2284 // potentially expensive instructions (fdiv for example).
2285 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2287 return nullptr;
2288
2289 // Fold constants for the predecessor block with constant incoming values.
2290 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2291 if (!NewC)
2292 return nullptr;
2293
2294 // Make a new binop in the predecessor block with the non-constant incoming
2295 // values.
2296 Builder.SetInsertPoint(PredBlockBranch);
2297 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2298 Phi0->getIncomingValueForBlock(OtherBB),
2299 Phi1->getIncomingValueForBlock(OtherBB));
2300 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2301 NotFoldedNewBO->copyIRFlags(&BO);
2302
2303 // Replace the binop with a phi of the new values. The old phis are dead.
2304 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2305 NewPhi->addIncoming(NewBO, OtherBB);
2306 NewPhi->addIncoming(NewC, ConstBB);
2307 return NewPhi;
2308}
2309
2311 bool IsOtherParamConst = isa<Constant>(I.getOperand(1));
2312
2313 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2314 if (Instruction *NewSel =
2315 FoldOpIntoSelect(I, Sel, false, !IsOtherParamConst))
2316 return NewSel;
2317 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2318 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2319 return NewPhi;
2320 }
2321 return nullptr;
2322}
2323
2325 // If this GEP has only 0 indices, it is the same pointer as
2326 // Src. If Src is not a trivial GEP too, don't combine
2327 // the indices.
2328 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2329 !Src.hasOneUse())
2330 return false;
2331 return true;
2332}
2333
2334/// Find a constant NewC that has property:
2335/// shuffle(NewC, ShMask) = C
2336/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2337///
2338/// A 1-to-1 mapping is not required. Example:
2339/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2341 VectorType *NewCTy) {
2342 if (isa<ScalableVectorType>(NewCTy)) {
2343 Constant *Splat = C->getSplatValue();
2344 if (!Splat)
2345 return nullptr;
2347 }
2348
2349 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2350 cast<FixedVectorType>(C->getType())->getNumElements())
2351 return nullptr;
2352
2353 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2354 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2355 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2356 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2357 for (unsigned I = 0; I < NumElts; ++I) {
2358 Constant *CElt = C->getAggregateElement(I);
2359 if (ShMask[I] >= 0) {
2360 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2361 Constant *NewCElt = NewVecC[ShMask[I]];
2362 // Bail out if:
2363 // 1. The constant vector contains a constant expression.
2364 // 2. The shuffle needs an element of the constant vector that can't
2365 // be mapped to a new constant vector.
2366 // 3. This is a widening shuffle that copies elements of V1 into the
2367 // extended elements (extending with poison is allowed).
2368 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2369 I >= NewCNumElts)
2370 return nullptr;
2371 NewVecC[ShMask[I]] = CElt;
2372 }
2373 }
2374 return ConstantVector::get(NewVecC);
2375}
2376
2377// Get the result of `Vector Op Splat` (or Splat Op Vector if \p SplatLHS).
2379 Constant *Splat, bool SplatLHS,
2380 const DataLayout &DL) {
2381 ElementCount EC = cast<VectorType>(Vector->getType())->getElementCount();
2383 Constant *RHS = Vector;
2384 if (!SplatLHS)
2385 std::swap(LHS, RHS);
2386 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
2387}
2388
2390 if (!isa<VectorType>(Inst.getType()))
2391 return nullptr;
2392
2393 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2394 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2395 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2396 cast<VectorType>(Inst.getType())->getElementCount());
2397 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2398 cast<VectorType>(Inst.getType())->getElementCount());
2399
2400 auto foldConstantsThroughSubVectorInsertSplat =
2401 [&](Value *MaybeSubVector, Value *MaybeSplat,
2402 bool SplatLHS) -> Instruction * {
2403 Value *Idx;
2404 Constant *Splat, *SubVector, *Dest;
2405 if (!match(MaybeSplat, m_ConstantSplat(m_Constant(Splat))) ||
2406 !match(MaybeSubVector,
2407 m_VectorInsert(m_Constant(Dest), m_Constant(SubVector),
2408 m_Value(Idx))))
2409 return nullptr;
2410 SubVector =
2411 constantFoldBinOpWithSplat(Opcode, SubVector, Splat, SplatLHS, DL);
2412 Dest = constantFoldBinOpWithSplat(Opcode, Dest, Splat, SplatLHS, DL);
2413 if (!SubVector || !Dest)
2414 return nullptr;
2415 auto *InsertVector =
2416 Builder.CreateInsertVector(Dest->getType(), Dest, SubVector, Idx);
2417 return replaceInstUsesWith(Inst, InsertVector);
2418 };
2419
2420 // If one operand is a constant splat and the other operand is a
2421 // `vector.insert` where both the destination and subvector are constant,
2422 // apply the operation to both the destination and subvector, returning a new
2423 // constant `vector.insert`. This helps constant folding for scalable vectors.
2424 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2425 /*MaybeSubVector=*/LHS, /*MaybeSplat=*/RHS, /*SplatLHS=*/false))
2426 return Folded;
2427 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2428 /*MaybeSubVector=*/RHS, /*MaybeSplat=*/LHS, /*SplatLHS=*/true))
2429 return Folded;
2430
2431 // If both operands of the binop are vector concatenations, then perform the
2432 // narrow binop on each pair of the source operands followed by concatenation
2433 // of the results.
2434 Value *L0, *L1, *R0, *R1;
2435 ArrayRef<int> Mask;
2436 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2437 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2438 LHS->hasOneUse() && RHS->hasOneUse() &&
2439 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2440 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2441 // This transform does not have the speculative execution constraint as
2442 // below because the shuffle is a concatenation. The new binops are
2443 // operating on exactly the same elements as the existing binop.
2444 // TODO: We could ease the mask requirement to allow different undef lanes,
2445 // but that requires an analysis of the binop-with-undef output value.
2446 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2447 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2448 BO->copyIRFlags(&Inst);
2449 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2450 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2451 BO->copyIRFlags(&Inst);
2452 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2453 }
2454
2455 auto createBinOpReverse = [&](Value *X, Value *Y) {
2456 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2457 if (auto *BO = dyn_cast<BinaryOperator>(V))
2458 BO->copyIRFlags(&Inst);
2459 Module *M = Inst.getModule();
2461 M, Intrinsic::vector_reverse, V->getType());
2462 return CallInst::Create(F, V);
2463 };
2464
2465 // NOTE: Reverse shuffles don't require the speculative execution protection
2466 // below because they don't affect which lanes take part in the computation.
2467
2468 Value *V1, *V2;
2469 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2470 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2471 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2472 (LHS->hasOneUse() || RHS->hasOneUse() ||
2473 (LHS == RHS && LHS->hasNUses(2))))
2474 return createBinOpReverse(V1, V2);
2475
2476 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2477 if (LHS->hasOneUse() && isSplatValue(RHS))
2478 return createBinOpReverse(V1, RHS);
2479 }
2480 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2481 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2482 return createBinOpReverse(LHS, V2);
2483
2484 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2485 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2486 if (auto *BO = dyn_cast<BinaryOperator>(V))
2487 BO->copyIRFlags(&Inst);
2488
2489 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2490 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2491 Module *M = Inst.getModule();
2493 M, Intrinsic::experimental_vp_reverse, V->getType());
2494 return CallInst::Create(F, {V, AllTrueMask, EVL});
2495 };
2496
2497 Value *EVL;
2499 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2500 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2502 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2503 (LHS->hasOneUse() || RHS->hasOneUse() ||
2504 (LHS == RHS && LHS->hasNUses(2))))
2505 return createBinOpVPReverse(V1, V2, EVL);
2506
2507 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2508 if (LHS->hasOneUse() && isSplatValue(RHS))
2509 return createBinOpVPReverse(V1, RHS, EVL);
2510 }
2511 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2512 else if (isSplatValue(LHS) &&
2514 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2515 return createBinOpVPReverse(LHS, V2, EVL);
2516
2517 // It may not be safe to reorder shuffles and things like div, urem, etc.
2518 // because we may trap when executing those ops on unknown vector elements.
2519 // See PR20059.
2521 return nullptr;
2522
2523 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2524 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2525 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2526 BO->copyIRFlags(&Inst);
2527 return new ShuffleVectorInst(XY, M);
2528 };
2529
2530 // If both arguments of the binary operation are shuffles that use the same
2531 // mask and shuffle within a single vector, move the shuffle after the binop.
2532 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2533 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2534 V1->getType() == V2->getType() &&
2535 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2536 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2537 return createBinOpShuffle(V1, V2, Mask);
2538 }
2539
2540 // If both arguments of a commutative binop are select-shuffles that use the
2541 // same mask with commuted operands, the shuffles are unnecessary.
2542 if (Inst.isCommutative() &&
2543 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2544 match(RHS,
2545 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2546 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2547 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2548 // TODO: Allow shuffles that contain undefs in the mask?
2549 // That is legal, but it reduces undef knowledge.
2550 // TODO: Allow arbitrary shuffles by shuffling after binop?
2551 // That might be legal, but we have to deal with poison.
2552 if (LShuf->isSelect() &&
2553 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2554 RShuf->isSelect() &&
2555 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2556 // Example:
2557 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2558 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2559 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2560 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2561 NewBO->copyIRFlags(&Inst);
2562 return NewBO;
2563 }
2564 }
2565
2566 // If one argument is a shuffle within one vector and the other is a constant,
2567 // try moving the shuffle after the binary operation. This canonicalization
2568 // intends to move shuffles closer to other shuffles and binops closer to
2569 // other binops, so they can be folded. It may also enable demanded elements
2570 // transforms.
2571 Constant *C;
2573 m_Mask(Mask))),
2574 m_ImmConstant(C)))) {
2575 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2576 "Shuffle should not change scalar type");
2577
2578 bool ConstOp1 = isa<Constant>(RHS);
2579 if (Constant *NewC =
2581 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2582 // which will cause UB for div/rem. Mask them with a safe constant.
2583 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2584 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2585
2586 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2587 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2588 Value *NewLHS = ConstOp1 ? V1 : NewC;
2589 Value *NewRHS = ConstOp1 ? NewC : V1;
2590 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2591 }
2592 }
2593
2594 // Try to reassociate to sink a splat shuffle after a binary operation.
2595 if (Inst.isAssociative() && Inst.isCommutative()) {
2596 // Canonicalize shuffle operand as LHS.
2597 if (isa<ShuffleVectorInst>(RHS))
2598 std::swap(LHS, RHS);
2599
2600 Value *X;
2601 ArrayRef<int> MaskC;
2602 int SplatIndex;
2603 Value *Y, *OtherOp;
2604 if (!match(LHS,
2605 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2606 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2607 X->getType() != Inst.getType() ||
2608 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2609 return nullptr;
2610
2611 // FIXME: This may not be safe if the analysis allows undef elements. By
2612 // moving 'Y' before the splat shuffle, we are implicitly assuming
2613 // that it is not undef/poison at the splat index.
2614 if (isSplatValue(OtherOp, SplatIndex)) {
2615 std::swap(Y, OtherOp);
2616 } else if (!isSplatValue(Y, SplatIndex)) {
2617 return nullptr;
2618 }
2619
2620 // X and Y are splatted values, so perform the binary operation on those
2621 // values followed by a splat followed by the 2nd binary operation:
2622 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2623 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2624 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2625 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2626 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2627
2628 // Intersect FMF on both new binops. Other (poison-generating) flags are
2629 // dropped to be safe.
2630 if (isa<FPMathOperator>(R)) {
2631 R->copyFastMathFlags(&Inst);
2632 R->andIRFlags(RHS);
2633 }
2634 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2635 NewInstBO->copyIRFlags(R);
2636 return R;
2637 }
2638
2639 return nullptr;
2640}
2641
2642/// Try to narrow the width of a binop if at least 1 operand is an extend of
2643/// of a value. This requires a potentially expensive known bits check to make
2644/// sure the narrow op does not overflow.
2645Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2646 // We need at least one extended operand.
2647 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2648
2649 // If this is a sub, we swap the operands since we always want an extension
2650 // on the RHS. The LHS can be an extension or a constant.
2651 if (BO.getOpcode() == Instruction::Sub)
2652 std::swap(Op0, Op1);
2653
2654 Value *X;
2655 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2656 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2657 return nullptr;
2658
2659 // If both operands are the same extension from the same source type and we
2660 // can eliminate at least one (hasOneUse), this might work.
2661 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2662 Value *Y;
2663 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2664 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2665 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2666 // If that did not match, see if we have a suitable constant operand.
2667 // Truncating and extending must produce the same constant.
2668 Constant *WideC;
2669 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2670 return nullptr;
2671 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2672 if (!NarrowC)
2673 return nullptr;
2674 Y = NarrowC;
2675 }
2676
2677 // Swap back now that we found our operands.
2678 if (BO.getOpcode() == Instruction::Sub)
2679 std::swap(X, Y);
2680
2681 // Both operands have narrow versions. Last step: the math must not overflow
2682 // in the narrow width.
2683 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2684 return nullptr;
2685
2686 // bo (ext X), (ext Y) --> ext (bo X, Y)
2687 // bo (ext X), C --> ext (bo X, C')
2688 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2689 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2690 if (IsSext)
2691 NewBinOp->setHasNoSignedWrap();
2692 else
2693 NewBinOp->setHasNoUnsignedWrap();
2694 }
2695 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2696}
2697
2698/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2699/// transform.
2704
2705/// Thread a GEP operation with constant indices through the constant true/false
2706/// arms of a select.
2708 InstCombiner::BuilderTy &Builder) {
2709 if (!GEP.hasAllConstantIndices())
2710 return nullptr;
2711
2712 Instruction *Sel;
2713 Value *Cond;
2714 Constant *TrueC, *FalseC;
2715 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2716 !match(Sel,
2717 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2718 return nullptr;
2719
2720 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2721 // Propagate 'inbounds' and metadata from existing instructions.
2722 // Note: using IRBuilder to create the constants for efficiency.
2723 SmallVector<Value *, 4> IndexC(GEP.indices());
2724 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2725 Type *Ty = GEP.getSourceElementType();
2726 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2727 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2728 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2729}
2730
2731// Canonicalization:
2732// gep T, (gep i8, base, C1), (Index + C2) into
2733// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2735 GEPOperator *Src,
2736 InstCombinerImpl &IC) {
2737 if (GEP.getNumIndices() != 1)
2738 return nullptr;
2739 auto &DL = IC.getDataLayout();
2740 Value *Base;
2741 const APInt *C1;
2742 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2743 return nullptr;
2744 Value *VarIndex;
2745 const APInt *C2;
2746 Type *PtrTy = Src->getType()->getScalarType();
2747 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2748 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2749 return nullptr;
2750 if (C1->getBitWidth() != IndexSizeInBits ||
2751 C2->getBitWidth() != IndexSizeInBits)
2752 return nullptr;
2753 Type *BaseType = GEP.getSourceElementType();
2755 return nullptr;
2756 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2757 APInt NewOffset = TypeSize * *C2 + *C1;
2758 if (NewOffset.isZero() ||
2759 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2761 if (GEP.hasNoUnsignedWrap() &&
2762 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2763 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2765 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2766 Flags |= GEPNoWrapFlags::inBounds();
2767 }
2768
2769 Value *GEPConst =
2770 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2771 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2772 }
2773
2774 return nullptr;
2775}
2776
2777/// Combine constant offsets separated by variable offsets.
2778/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2780 InstCombinerImpl &IC) {
2781 if (!GEP.hasAllConstantIndices())
2782 return nullptr;
2783
2786 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2787 while (true) {
2788 if (!InnerGEP)
2789 return nullptr;
2790
2791 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2792 if (InnerGEP->hasAllConstantIndices())
2793 break;
2794
2795 if (!InnerGEP->hasOneUse())
2796 return nullptr;
2797
2798 Skipped.push_back(InnerGEP);
2799 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2800 }
2801
2802 // The two constant offset GEPs are directly adjacent: Let normal offset
2803 // merging handle it.
2804 if (Skipped.empty())
2805 return nullptr;
2806
2807 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2808 // if profitable.
2809 if (!InnerGEP->hasOneUse())
2810 return nullptr;
2811
2812 // Don't bother with vector splats.
2813 Type *Ty = GEP.getType();
2814 if (InnerGEP->getType() != Ty)
2815 return nullptr;
2816
2817 const DataLayout &DL = IC.getDataLayout();
2818 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2819 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2820 !InnerGEP->accumulateConstantOffset(DL, Offset))
2821 return nullptr;
2822
2823 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2824 for (GetElementPtrInst *SkippedGEP : Skipped)
2825 SkippedGEP->setNoWrapFlags(NW);
2826
2827 return IC.replaceInstUsesWith(
2828 GEP,
2829 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2830 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2831}
2832
2834 GEPOperator *Src) {
2835 // Combine Indices - If the source pointer to this getelementptr instruction
2836 // is a getelementptr instruction with matching element type, combine the
2837 // indices of the two getelementptr instructions into a single instruction.
2838 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2839 return nullptr;
2840
2841 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2842 return I;
2843
2844 if (auto *I = combineConstantOffsets(GEP, *this))
2845 return I;
2846
2847 if (Src->getResultElementType() != GEP.getSourceElementType())
2848 return nullptr;
2849
2850 // Fold chained GEP with constant base into single GEP:
2851 // gep i8, (gep i8, %base, C1), (select Cond, C2, C3)
2852 // -> gep i8, %base, (select Cond, C1+C2, C1+C3)
2853 if (Src->hasOneUse() && GEP.getNumIndices() == 1 &&
2854 Src->getNumIndices() == 1) {
2855 Value *SrcIdx = *Src->idx_begin();
2856 Value *GEPIdx = *GEP.idx_begin();
2857 const APInt *ConstOffset, *TrueVal, *FalseVal;
2858 Value *Cond;
2859
2860 if ((match(SrcIdx, m_APInt(ConstOffset)) &&
2861 match(GEPIdx,
2862 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal)))) ||
2863 (match(GEPIdx, m_APInt(ConstOffset)) &&
2864 match(SrcIdx,
2865 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal))))) {
2866 auto *Select = isa<SelectInst>(GEPIdx) ? cast<SelectInst>(GEPIdx)
2867 : cast<SelectInst>(SrcIdx);
2868
2869 // Make sure the select has only one use.
2870 if (!Select->hasOneUse())
2871 return nullptr;
2872
2873 if (TrueVal->getBitWidth() != ConstOffset->getBitWidth() ||
2874 FalseVal->getBitWidth() != ConstOffset->getBitWidth())
2875 return nullptr;
2876
2877 APInt NewTrueVal = *ConstOffset + *TrueVal;
2878 APInt NewFalseVal = *ConstOffset + *FalseVal;
2879 Constant *NewTrue = ConstantInt::get(Select->getType(), NewTrueVal);
2880 Constant *NewFalse = ConstantInt::get(Select->getType(), NewFalseVal);
2881 Value *NewSelect = Builder.CreateSelect(
2882 Cond, NewTrue, NewFalse, /*Name=*/"",
2883 /*MDFrom=*/(ProfcheckDisableMetadataFixes ? nullptr : Select));
2884 GEPNoWrapFlags Flags =
2886 return replaceInstUsesWith(GEP,
2887 Builder.CreateGEP(GEP.getResultElementType(),
2888 Src->getPointerOperand(),
2889 NewSelect, "", Flags));
2890 }
2891 }
2892
2893 // Find out whether the last index in the source GEP is a sequential idx.
2894 bool EndsWithSequential = false;
2895 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2896 I != E; ++I)
2897 EndsWithSequential = I.isSequential();
2898 if (!EndsWithSequential)
2899 return nullptr;
2900
2901 // Replace: gep (gep %P, long B), long A, ...
2902 // With: T = long A+B; gep %P, T, ...
2903 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2904 Value *GO1 = GEP.getOperand(1);
2905
2906 // If they aren't the same type, then the input hasn't been processed
2907 // by the loop above yet (which canonicalizes sequential index types to
2908 // intptr_t). Just avoid transforming this until the input has been
2909 // normalized.
2910 if (SO1->getType() != GO1->getType())
2911 return nullptr;
2912
2913 Value *Sum =
2914 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2915 // Only do the combine when we are sure the cost after the
2916 // merge is never more than that before the merge.
2917 if (Sum == nullptr)
2918 return nullptr;
2919
2921 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2922 Indices.push_back(Sum);
2923 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2924
2925 // Don't create GEPs with more than one non-zero index.
2926 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2927 auto *C = dyn_cast<Constant>(Idx);
2928 return !C || !C->isNullValue();
2929 });
2930 if (NumNonZeroIndices > 1)
2931 return nullptr;
2932
2933 return replaceInstUsesWith(
2934 GEP, Builder.CreateGEP(
2935 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2937}
2938
2941 bool &DoesConsume, unsigned Depth) {
2942 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2943 // ~(~(X)) -> X.
2944 Value *A, *B;
2945 if (match(V, m_Not(m_Value(A)))) {
2946 DoesConsume = true;
2947 return A;
2948 }
2949
2950 Constant *C;
2951 // Constants can be considered to be not'ed values.
2952 if (match(V, m_ImmConstant(C)))
2953 return ConstantExpr::getNot(C);
2954
2956 return nullptr;
2957
2958 // The rest of the cases require that we invert all uses so don't bother
2959 // doing the analysis if we know we can't use the result.
2960 if (!WillInvertAllUses)
2961 return nullptr;
2962
2963 // Compares can be inverted if all of their uses are being modified to use
2964 // the ~V.
2965 if (auto *I = dyn_cast<CmpInst>(V)) {
2966 if (Builder != nullptr)
2967 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2968 I->getOperand(1));
2969 return NonNull;
2970 }
2971
2972 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2973 // `(-1 - B) - A` if we are willing to invert all of the uses.
2974 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2975 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2976 DoesConsume, Depth))
2977 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2978 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2979 DoesConsume, Depth))
2980 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2981 return nullptr;
2982 }
2983
2984 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2985 // into `A ^ B` if we are willing to invert all of the uses.
2986 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2987 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2988 DoesConsume, Depth))
2989 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2990 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2991 DoesConsume, Depth))
2992 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2993 return nullptr;
2994 }
2995
2996 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2997 // `A + (-1 - B)` if we are willing to invert all of the uses.
2998 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2999 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3000 DoesConsume, Depth))
3001 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
3002 return nullptr;
3003 }
3004
3005 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
3006 // into `A s>> B` if we are willing to invert all of the uses.
3007 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
3008 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3009 DoesConsume, Depth))
3010 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
3011 return nullptr;
3012 }
3013
3014 Value *Cond;
3015 // LogicOps are special in that we canonicalize them at the cost of an
3016 // instruction.
3017 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
3019 // Selects/min/max with invertible operands are freely invertible
3020 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
3021 bool LocalDoesConsume = DoesConsume;
3022 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
3023 LocalDoesConsume, Depth))
3024 return nullptr;
3025 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3026 LocalDoesConsume, Depth)) {
3027 DoesConsume = LocalDoesConsume;
3028 if (Builder != nullptr) {
3029 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3030 DoesConsume, Depth);
3031 assert(NotB != nullptr &&
3032 "Unable to build inverted value for known freely invertable op");
3033 if (auto *II = dyn_cast<IntrinsicInst>(V))
3034 return Builder->CreateBinaryIntrinsic(
3035 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
3036 return Builder->CreateSelect(Cond, NotA, NotB);
3037 }
3038 return NonNull;
3039 }
3040 }
3041
3042 if (PHINode *PN = dyn_cast<PHINode>(V)) {
3043 bool LocalDoesConsume = DoesConsume;
3045 for (Use &U : PN->operands()) {
3046 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
3047 Value *NewIncomingVal = getFreelyInvertedImpl(
3048 U.get(), /*WillInvertAllUses=*/false,
3049 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
3050 if (NewIncomingVal == nullptr)
3051 return nullptr;
3052 // Make sure that we can safely erase the original PHI node.
3053 if (NewIncomingVal == V)
3054 return nullptr;
3055 if (Builder != nullptr)
3056 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
3057 }
3058
3059 DoesConsume = LocalDoesConsume;
3060 if (Builder != nullptr) {
3062 Builder->SetInsertPoint(PN);
3063 PHINode *NewPN =
3064 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
3065 for (auto [Val, Pred] : IncomingValues)
3066 NewPN->addIncoming(Val, Pred);
3067 return NewPN;
3068 }
3069 return NonNull;
3070 }
3071
3072 if (match(V, m_SExtLike(m_Value(A)))) {
3073 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3074 DoesConsume, Depth))
3075 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
3076 return nullptr;
3077 }
3078
3079 if (match(V, m_Trunc(m_Value(A)))) {
3080 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3081 DoesConsume, Depth))
3082 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
3083 return nullptr;
3084 }
3085
3086 // De Morgan's Laws:
3087 // (~(A | B)) -> (~A & ~B)
3088 // (~(A & B)) -> (~A | ~B)
3089 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
3090 bool IsLogical, Value *A,
3091 Value *B) -> Value * {
3092 bool LocalDoesConsume = DoesConsume;
3093 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
3094 LocalDoesConsume, Depth))
3095 return nullptr;
3096 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3097 LocalDoesConsume, Depth)) {
3098 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3099 LocalDoesConsume, Depth);
3100 DoesConsume = LocalDoesConsume;
3101 if (IsLogical)
3102 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3103 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3104 }
3105
3106 return nullptr;
3107 };
3108
3109 if (match(V, m_Or(m_Value(A), m_Value(B))))
3110 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
3111 B);
3112
3113 if (match(V, m_And(m_Value(A), m_Value(B))))
3114 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
3115 B);
3116
3117 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
3118 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
3119 B);
3120
3121 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
3122 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
3123 B);
3124
3125 return nullptr;
3126}
3127
3128/// Return true if we should canonicalize the gep to an i8 ptradd.
3130 Value *PtrOp = GEP.getOperand(0);
3131 Type *GEPEltType = GEP.getSourceElementType();
3132 if (GEPEltType->isIntegerTy(8))
3133 return false;
3134
3135 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3136 // intrinsic. This has better support in BasicAA.
3137 if (GEPEltType->isScalableTy())
3138 return true;
3139
3140 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3141 // together.
3142 if (GEP.getNumIndices() == 1 &&
3143 match(GEP.getOperand(1),
3145 m_Shl(m_Value(), m_ConstantInt())))))
3146 return true;
3147
3148 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3149 // possibly be merged together.
3150 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3151 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3152 any_of(GEP.indices(), [](Value *V) {
3153 const APInt *C;
3154 return match(V, m_APInt(C)) && !C->isZero();
3155 });
3156}
3157
3159 IRBuilderBase &Builder) {
3160 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3161 if (!Op1)
3162 return nullptr;
3163
3164 // Don't fold a GEP into itself through a PHI node. This can only happen
3165 // through the back-edge of a loop. Folding a GEP into itself means that
3166 // the value of the previous iteration needs to be stored in the meantime,
3167 // thus requiring an additional register variable to be live, but not
3168 // actually achieving anything (the GEP still needs to be executed once per
3169 // loop iteration).
3170 if (Op1 == &GEP)
3171 return nullptr;
3172 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3173
3174 int DI = -1;
3175
3176 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3177 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3178 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3179 Op1->getSourceElementType() != Op2->getSourceElementType())
3180 return nullptr;
3181
3182 // As for Op1 above, don't try to fold a GEP into itself.
3183 if (Op2 == &GEP)
3184 return nullptr;
3185
3186 // Keep track of the type as we walk the GEP.
3187 Type *CurTy = nullptr;
3188
3189 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3190 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3191 return nullptr;
3192
3193 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3194 if (DI == -1) {
3195 // We have not seen any differences yet in the GEPs feeding the
3196 // PHI yet, so we record this one if it is allowed to be a
3197 // variable.
3198
3199 // The first two arguments can vary for any GEP, the rest have to be
3200 // static for struct slots
3201 if (J > 1) {
3202 assert(CurTy && "No current type?");
3203 if (CurTy->isStructTy())
3204 return nullptr;
3205 }
3206
3207 DI = J;
3208 } else {
3209 // The GEP is different by more than one input. While this could be
3210 // extended to support GEPs that vary by more than one variable it
3211 // doesn't make sense since it greatly increases the complexity and
3212 // would result in an R+R+R addressing mode which no backend
3213 // directly supports and would need to be broken into several
3214 // simpler instructions anyway.
3215 return nullptr;
3216 }
3217 }
3218
3219 // Sink down a layer of the type for the next iteration.
3220 if (J > 0) {
3221 if (J == 1) {
3222 CurTy = Op1->getSourceElementType();
3223 } else {
3224 CurTy =
3225 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3226 }
3227 }
3228 }
3229
3230 NW &= Op2->getNoWrapFlags();
3231 }
3232
3233 // If not all GEPs are identical we'll have to create a new PHI node.
3234 // Check that the old PHI node has only one use so that it will get
3235 // removed.
3236 if (DI != -1 && !PN->hasOneUse())
3237 return nullptr;
3238
3239 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3240 NewGEP->setNoWrapFlags(NW);
3241
3242 if (DI == -1) {
3243 // All the GEPs feeding the PHI are identical. Clone one down into our
3244 // BB so that it can be merged with the current GEP.
3245 } else {
3246 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3247 // into the current block so it can be merged, and create a new PHI to
3248 // set that index.
3249 PHINode *NewPN;
3250 {
3251 IRBuilderBase::InsertPointGuard Guard(Builder);
3252 Builder.SetInsertPoint(PN);
3253 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3254 PN->getNumOperands());
3255 }
3256
3257 for (auto &I : PN->operands())
3258 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3259 PN->getIncomingBlock(I));
3260
3261 NewGEP->setOperand(DI, NewPN);
3262 }
3263
3264 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3265 return NewGEP;
3266}
3267
3269 Value *PtrOp = GEP.getOperand(0);
3270 SmallVector<Value *, 8> Indices(GEP.indices());
3271 Type *GEPType = GEP.getType();
3272 Type *GEPEltType = GEP.getSourceElementType();
3273 if (Value *V =
3274 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3275 SQ.getWithInstruction(&GEP)))
3276 return replaceInstUsesWith(GEP, V);
3277
3278 // For vector geps, use the generic demanded vector support.
3279 // Skip if GEP return type is scalable. The number of elements is unknown at
3280 // compile-time.
3281 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3282 auto VWidth = GEPFVTy->getNumElements();
3283 APInt PoisonElts(VWidth, 0);
3284 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3285 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3286 PoisonElts)) {
3287 if (V != &GEP)
3288 return replaceInstUsesWith(GEP, V);
3289 return &GEP;
3290 }
3291 }
3292
3293 // Eliminate unneeded casts for indices, and replace indices which displace
3294 // by multiples of a zero size type with zero.
3295 bool MadeChange = false;
3296
3297 // Index width may not be the same width as pointer width.
3298 // Data layout chooses the right type based on supported integer types.
3299 Type *NewScalarIndexTy =
3300 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3301
3303 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3304 ++I, ++GTI) {
3305 // Skip indices into struct types.
3306 if (GTI.isStruct())
3307 continue;
3308
3309 Type *IndexTy = (*I)->getType();
3310 Type *NewIndexType =
3311 IndexTy->isVectorTy()
3312 ? VectorType::get(NewScalarIndexTy,
3313 cast<VectorType>(IndexTy)->getElementCount())
3314 : NewScalarIndexTy;
3315
3316 // If the element type has zero size then any index over it is equivalent
3317 // to an index of zero, so replace it with zero if it is not zero already.
3318 Type *EltTy = GTI.getIndexedType();
3319 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3320 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3321 *I = Constant::getNullValue(NewIndexType);
3322 MadeChange = true;
3323 }
3324
3325 if (IndexTy != NewIndexType) {
3326 // If we are using a wider index than needed for this platform, shrink
3327 // it to what we need. If narrower, sign-extend it to what we need.
3328 // This explicit cast can make subsequent optimizations more obvious.
3329 if (IndexTy->getScalarSizeInBits() <
3330 NewIndexType->getScalarSizeInBits()) {
3331 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3332 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3333 else
3334 *I = Builder.CreateSExt(*I, NewIndexType);
3335 } else {
3336 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3337 GEP.hasNoUnsignedSignedWrap());
3338 }
3339 MadeChange = true;
3340 }
3341 }
3342 if (MadeChange)
3343 return &GEP;
3344
3345 // Canonicalize constant GEPs to i8 type.
3346 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3347 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3348 if (GEP.accumulateConstantOffset(DL, Offset))
3349 return replaceInstUsesWith(
3350 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3351 GEP.getNoWrapFlags()));
3352 }
3353
3355 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3356 Value *NewGEP =
3357 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3358 return replaceInstUsesWith(GEP, NewGEP);
3359 }
3360
3361 // Strip trailing zero indices.
3362 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3363 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3364 return replaceInstUsesWith(
3365 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3366 drop_end(Indices), "", GEP.getNoWrapFlags()));
3367 }
3368
3369 // Strip leading zero indices.
3370 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3371 if (FirstIdx && FirstIdx->isNullValue() &&
3372 !FirstIdx->getType()->isVectorTy()) {
3374 ++GTI;
3375 if (!GTI.isStruct())
3376 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3377 GEP.getPointerOperand(),
3378 drop_begin(Indices), "",
3379 GEP.getNoWrapFlags()));
3380 }
3381
3382 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3383 // Note that this looses information about undef lanes; we run it after
3384 // demanded bits to partially mitigate that loss.
3385 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3386 return Op->getType()->isVectorTy() && getSplatValue(Op);
3387 })) {
3388 SmallVector<Value *> NewOps;
3389 for (auto &Op : GEP.operands()) {
3390 if (Op->getType()->isVectorTy())
3391 if (Value *Scalar = getSplatValue(Op)) {
3392 NewOps.push_back(Scalar);
3393 continue;
3394 }
3395 NewOps.push_back(Op);
3396 }
3397
3398 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3399 ArrayRef(NewOps).drop_front(), GEP.getName(),
3400 GEP.getNoWrapFlags());
3401 if (!Res->getType()->isVectorTy()) {
3402 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3403 Res = Builder.CreateVectorSplat(EC, Res);
3404 }
3405 return replaceInstUsesWith(GEP, Res);
3406 }
3407
3408 bool SeenNonZeroIndex = false;
3409 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3410 auto *C = dyn_cast<Constant>(Idx);
3411 if (C && C->isNullValue())
3412 continue;
3413
3414 if (!SeenNonZeroIndex) {
3415 SeenNonZeroIndex = true;
3416 continue;
3417 }
3418
3419 // GEP has multiple non-zero indices: Split it.
3420 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3421 Value *FrontGEP =
3422 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3423 GEP.getName() + ".split", GEP.getNoWrapFlags());
3424
3425 SmallVector<Value *> BackIndices;
3426 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3427 append_range(BackIndices, drop_begin(Indices, IdxNum));
3429 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3430 BackIndices, GEP.getNoWrapFlags());
3431 }
3432
3433 // Check to see if the inputs to the PHI node are getelementptr instructions.
3434 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3435 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3436 return replaceOperand(GEP, 0, NewPtrOp);
3437 }
3438
3439 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3440 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3441 return I;
3442
3443 if (GEP.getNumIndices() == 1) {
3444 unsigned AS = GEP.getPointerAddressSpace();
3445 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3446 DL.getIndexSizeInBits(AS)) {
3447 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3448
3449 if (TyAllocSize == 1) {
3450 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3451 // but only if the result pointer is only used as if it were an integer.
3452 // (The case where the underlying object is the same is handled by
3453 // InstSimplify.)
3454 Value *X = GEP.getPointerOperand();
3455 Value *Y;
3456 if (match(GEP.getOperand(1), m_Sub(m_PtrToIntOrAddr(m_Value(Y)),
3458 GEPType == Y->getType()) {
3459 bool HasNonAddressBits =
3460 DL.getAddressSizeInBits(AS) != DL.getPointerSizeInBits(AS);
3461 bool Changed = false;
3462 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3463 bool ShouldReplace =
3464 isa<PtrToAddrInst, ICmpInst>(U.getUser()) ||
3465 (!HasNonAddressBits && isa<PtrToIntInst>(U.getUser()));
3466 Changed |= ShouldReplace;
3467 return ShouldReplace;
3468 });
3469 return Changed ? &GEP : nullptr;
3470 }
3471 } else if (auto *ExactIns =
3472 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3473 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3474 Value *V;
3475 if (ExactIns->isExact()) {
3476 if ((has_single_bit(TyAllocSize) &&
3477 match(GEP.getOperand(1),
3478 m_Shr(m_Value(V),
3479 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3480 match(GEP.getOperand(1),
3481 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3482 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3483 GEP.getPointerOperand(), V,
3484 GEP.getNoWrapFlags());
3485 }
3486 }
3487 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3488 // Try to canonicalize non-i8 element type to i8 if the index is an
3489 // exact instruction. If the index is an exact instruction (div/shr)
3490 // with a constant RHS, we can fold the non-i8 element scale into the
3491 // div/shr (similiar to the mul case, just inverted).
3492 const APInt *C;
3493 std::optional<APInt> NewC;
3494 if (has_single_bit(TyAllocSize) &&
3495 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3496 C->uge(countr_zero(TyAllocSize)))
3497 NewC = *C - countr_zero(TyAllocSize);
3498 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3499 APInt Quot;
3500 uint64_t Rem;
3501 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3502 if (Rem == 0)
3503 NewC = Quot;
3504 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3505 APInt Quot;
3506 int64_t Rem;
3507 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3508 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3509 if (!Quot.isAllOnes() && Rem == 0)
3510 NewC = Quot;
3511 }
3512
3513 if (NewC.has_value()) {
3514 Value *NewOp = Builder.CreateBinOp(
3515 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3516 ConstantInt::get(V->getType(), *NewC));
3517 cast<BinaryOperator>(NewOp)->setIsExact();
3518 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3519 GEP.getPointerOperand(), NewOp,
3520 GEP.getNoWrapFlags());
3521 }
3522 }
3523 }
3524 }
3525 }
3526 // We do not handle pointer-vector geps here.
3527 if (GEPType->isVectorTy())
3528 return nullptr;
3529
3530 if (!GEP.isInBounds()) {
3531 unsigned IdxWidth =
3532 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3533 APInt BasePtrOffset(IdxWidth, 0);
3534 Value *UnderlyingPtrOp =
3535 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3536 bool CanBeNull, CanBeFreed;
3537 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3538 DL, CanBeNull, CanBeFreed);
3539 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3540 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3541 BasePtrOffset.isNonNegative()) {
3542 APInt AllocSize(IdxWidth, DerefBytes);
3543 if (BasePtrOffset.ule(AllocSize)) {
3545 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3546 }
3547 }
3548 }
3549 }
3550
3551 // nusw + nneg -> nuw
3552 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3553 all_of(GEP.indices(), [&](Value *Idx) {
3554 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3555 })) {
3556 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3557 return &GEP;
3558 }
3559
3560 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3561 // to do this after having tried to derive "nuw" above.
3562 if (GEP.getNumIndices() == 1) {
3563 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3564 // geps if transforming into (gep (gep p, x), y).
3565 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3566 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3567 // that x + y does not have unsigned wrap.
3568 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3569 return GEP.getNoWrapFlags();
3570 return GEPNoWrapFlags::none();
3571 };
3572
3573 // Try to replace ADD + GEP with GEP + GEP.
3574 Value *Idx1, *Idx2;
3575 if (match(GEP.getOperand(1),
3576 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3577 // %idx = add i64 %idx1, %idx2
3578 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3579 // as:
3580 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3581 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3582 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3583 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3584 auto *NewPtr =
3585 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3586 Idx1, "", NWFlags);
3587 return replaceInstUsesWith(GEP,
3588 Builder.CreateGEP(GEP.getSourceElementType(),
3589 NewPtr, Idx2, "", NWFlags));
3590 }
3591 ConstantInt *C;
3592 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3593 m_Value(Idx1), m_ConstantInt(C))))))) {
3594 // %add = add nsw i32 %idx1, idx2
3595 // %sidx = sext i32 %add to i64
3596 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3597 // as:
3598 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3599 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3600 bool NUW = match(GEP.getOperand(1),
3602 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3603 auto *NewPtr = Builder.CreateGEP(
3604 GEP.getSourceElementType(), GEP.getPointerOperand(),
3605 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3606 return replaceInstUsesWith(
3607 GEP,
3608 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3609 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3610 "", NWFlags));
3611 }
3612 }
3613
3615 return R;
3616
3617 return nullptr;
3618}
3619
3621 Instruction *AI) {
3623 return true;
3624 if (auto *LI = dyn_cast<LoadInst>(V))
3625 return isa<GlobalVariable>(LI->getPointerOperand());
3626 // Two distinct allocations will never be equal.
3627 return isAllocLikeFn(V, &TLI) && V != AI;
3628}
3629
3630/// Given a call CB which uses an address UsedV, return true if we can prove the
3631/// call's only possible effect is storing to V.
3632static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3633 const TargetLibraryInfo &TLI) {
3634 if (!CB.use_empty())
3635 // TODO: add recursion if returned attribute is present
3636 return false;
3637
3638 if (CB.isTerminator())
3639 // TODO: remove implementation restriction
3640 return false;
3641
3642 if (!CB.willReturn() || !CB.doesNotThrow())
3643 return false;
3644
3645 // If the only possible side effect of the call is writing to the alloca,
3646 // and the result isn't used, we can safely remove any reads implied by the
3647 // call including those which might read the alloca itself.
3648 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3649 return Dest && Dest->Ptr == UsedV;
3650}
3651
3652static std::optional<ModRefInfo>
3654 const TargetLibraryInfo &TLI, bool KnowInit) {
3656 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3657 Worklist.push_back(AI);
3659
3660 do {
3661 Instruction *PI = Worklist.pop_back_val();
3662 for (User *U : PI->users()) {
3664 switch (I->getOpcode()) {
3665 default:
3666 // Give up the moment we see something we can't handle.
3667 return std::nullopt;
3668
3669 case Instruction::AddrSpaceCast:
3670 case Instruction::BitCast:
3671 case Instruction::GetElementPtr:
3672 Users.emplace_back(I);
3673 Worklist.push_back(I);
3674 continue;
3675
3676 case Instruction::ICmp: {
3677 ICmpInst *ICI = cast<ICmpInst>(I);
3678 // We can fold eq/ne comparisons with null to false/true, respectively.
3679 // We also fold comparisons in some conditions provided the alloc has
3680 // not escaped (see isNeverEqualToUnescapedAlloc).
3681 if (!ICI->isEquality())
3682 return std::nullopt;
3683 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3684 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3685 return std::nullopt;
3686
3687 // Do not fold compares to aligned_alloc calls, as they may have to
3688 // return null in case the required alignment cannot be satisfied,
3689 // unless we can prove that both alignment and size are valid.
3690 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3691 // Check if alignment and size of a call to aligned_alloc is valid,
3692 // that is alignment is a power-of-2 and the size is a multiple of the
3693 // alignment.
3694 const APInt *Alignment;
3695 const APInt *Size;
3696 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3697 match(CB->getArgOperand(1), m_APInt(Size)) &&
3698 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3699 };
3700 auto *CB = dyn_cast<CallBase>(AI);
3701 LibFunc TheLibFunc;
3702 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3703 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3704 !AlignmentAndSizeKnownValid(CB))
3705 return std::nullopt;
3706 Users.emplace_back(I);
3707 continue;
3708 }
3709
3710 case Instruction::Call:
3711 // Ignore no-op and store intrinsics.
3713 switch (II->getIntrinsicID()) {
3714 default:
3715 return std::nullopt;
3716
3717 case Intrinsic::memmove:
3718 case Intrinsic::memcpy:
3719 case Intrinsic::memset: {
3721 if (MI->isVolatile())
3722 return std::nullopt;
3723 // Note: this could also be ModRef, but we can still interpret that
3724 // as just Mod in that case.
3725 ModRefInfo NewAccess =
3726 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3727 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3728 return std::nullopt;
3729 Access |= NewAccess;
3730 [[fallthrough]];
3731 }
3732 case Intrinsic::assume:
3733 case Intrinsic::invariant_start:
3734 case Intrinsic::invariant_end:
3735 case Intrinsic::lifetime_start:
3736 case Intrinsic::lifetime_end:
3737 case Intrinsic::objectsize:
3738 Users.emplace_back(I);
3739 continue;
3740 case Intrinsic::launder_invariant_group:
3741 case Intrinsic::strip_invariant_group:
3742 Users.emplace_back(I);
3743 Worklist.push_back(I);
3744 continue;
3745 }
3746 }
3747
3748 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3749 getAllocationFamily(I, &TLI) == Family) {
3750 Users.emplace_back(I);
3751 continue;
3752 }
3753
3754 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3755 getAllocationFamily(I, &TLI) == Family) {
3756 Users.emplace_back(I);
3757 Worklist.push_back(I);
3758 continue;
3759 }
3760
3761 if (!isRefSet(Access) &&
3762 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3764 Users.emplace_back(I);
3765 continue;
3766 }
3767
3768 return std::nullopt;
3769
3770 case Instruction::Store: {
3772 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3773 return std::nullopt;
3774 if (isRefSet(Access))
3775 return std::nullopt;
3777 Users.emplace_back(I);
3778 continue;
3779 }
3780
3781 case Instruction::Load: {
3782 LoadInst *LI = cast<LoadInst>(I);
3783 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3784 return std::nullopt;
3785 if (isModSet(Access))
3786 return std::nullopt;
3788 Users.emplace_back(I);
3789 continue;
3790 }
3791 }
3792 llvm_unreachable("missing a return?");
3793 }
3794 } while (!Worklist.empty());
3795
3797 return Access;
3798}
3799
3802
3803 // If we have a malloc call which is only used in any amount of comparisons to
3804 // null and free calls, delete the calls and replace the comparisons with true
3805 // or false as appropriate.
3806
3807 // This is based on the principle that we can substitute our own allocation
3808 // function (which will never return null) rather than knowledge of the
3809 // specific function being called. In some sense this can change the permitted
3810 // outputs of a program (when we convert a malloc to an alloca, the fact that
3811 // the allocation is now on the stack is potentially visible, for example),
3812 // but we believe in a permissible manner.
3814
3815 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3816 // before each store.
3818 std::unique_ptr<DIBuilder> DIB;
3819 if (isa<AllocaInst>(MI)) {
3820 findDbgUsers(&MI, DVRs);
3821 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3822 }
3823
3824 // Determine what getInitialValueOfAllocation would return without actually
3825 // allocating the result.
3826 bool KnowInitUndef = false;
3827 bool KnowInitZero = false;
3828 Constant *Init =
3830 if (Init) {
3831 if (isa<UndefValue>(Init))
3832 KnowInitUndef = true;
3833 else if (Init->isNullValue())
3834 KnowInitZero = true;
3835 }
3836 // The various sanitizers don't actually return undef memory, but rather
3837 // memory initialized with special forms of runtime poison
3838 auto &F = *MI.getFunction();
3839 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3840 F.hasFnAttribute(Attribute::SanitizeAddress))
3841 KnowInitUndef = false;
3842
3843 auto Removable =
3844 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3845 if (Removable) {
3846 for (WeakTrackingVH &User : Users) {
3847 // Lowering all @llvm.objectsize and MTI calls first because they may use
3848 // a bitcast/GEP of the alloca we are removing.
3849 if (!User)
3850 continue;
3851
3853
3855 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3856 SmallVector<Instruction *> InsertedInstructions;
3857 Value *Result = lowerObjectSizeCall(
3858 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3859 for (Instruction *Inserted : InsertedInstructions)
3860 Worklist.add(Inserted);
3861 replaceInstUsesWith(*I, Result);
3863 User = nullptr; // Skip examining in the next loop.
3864 continue;
3865 }
3866 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3867 if (KnowInitZero && isRefSet(*Removable)) {
3869 Builder.SetInsertPoint(MTI);
3870 auto *M = Builder.CreateMemSet(
3871 MTI->getRawDest(),
3872 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3873 MTI->getLength(), MTI->getDestAlign());
3874 M->copyMetadata(*MTI);
3875 }
3876 }
3877 }
3878 }
3879 for (WeakTrackingVH &User : Users) {
3880 if (!User)
3881 continue;
3882
3884
3885 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3887 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3888 C->isFalseWhenEqual()));
3889 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3890 for (auto *DVR : DVRs)
3891 if (DVR->isAddressOfVariable())
3893 } else {
3894 // Casts, GEP, or anything else: we're about to delete this instruction,
3895 // so it can not have any valid uses.
3896 Constant *Replace;
3897 if (isa<LoadInst>(I)) {
3898 assert(KnowInitZero || KnowInitUndef);
3899 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3900 : Constant::getNullValue(I->getType());
3901 } else
3902 Replace = PoisonValue::get(I->getType());
3903 replaceInstUsesWith(*I, Replace);
3904 }
3906 }
3907
3909 // Replace invoke with a NOP intrinsic to maintain the original CFG
3910 Module *M = II->getModule();
3911 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3912 auto *NewII = InvokeInst::Create(
3913 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3914 NewII->setDebugLoc(II->getDebugLoc());
3915 }
3916
3917 // Remove debug intrinsics which describe the value contained within the
3918 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3919 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3920 //
3921 // ```
3922 // define void @foo(i32 %0) {
3923 // %a = alloca i32 ; Deleted.
3924 // store i32 %0, i32* %a
3925 // dbg.value(i32 %0, "arg0") ; Not deleted.
3926 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3927 // call void @trivially_inlinable_no_op(i32* %a)
3928 // ret void
3929 // }
3930 // ```
3931 //
3932 // This may not be required if we stop describing the contents of allocas
3933 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3934 // the LowerDbgDeclare utility.
3935 //
3936 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3937 // "arg0" dbg.value may be stale after the call. However, failing to remove
3938 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3939 //
3940 // FIXME: the Assignment Tracking project has now likely made this
3941 // redundant (and it's sometimes harmful).
3942 for (auto *DVR : DVRs)
3943 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3944 DVR->eraseFromParent();
3945
3946 return eraseInstFromFunction(MI);
3947 }
3948 return nullptr;
3949}
3950
3951/// Move the call to free before a NULL test.
3952///
3953/// Check if this free is accessed after its argument has been test
3954/// against NULL (property 0).
3955/// If yes, it is legal to move this call in its predecessor block.
3956///
3957/// The move is performed only if the block containing the call to free
3958/// will be removed, i.e.:
3959/// 1. it has only one predecessor P, and P has two successors
3960/// 2. it contains the call, noops, and an unconditional branch
3961/// 3. its successor is the same as its predecessor's successor
3962///
3963/// The profitability is out-of concern here and this function should
3964/// be called only if the caller knows this transformation would be
3965/// profitable (e.g., for code size).
3967 const DataLayout &DL) {
3968 Value *Op = FI.getArgOperand(0);
3969 BasicBlock *FreeInstrBB = FI.getParent();
3970 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3971
3972 // Validate part of constraint #1: Only one predecessor
3973 // FIXME: We can extend the number of predecessor, but in that case, we
3974 // would duplicate the call to free in each predecessor and it may
3975 // not be profitable even for code size.
3976 if (!PredBB)
3977 return nullptr;
3978
3979 // Validate constraint #2: Does this block contains only the call to
3980 // free, noops, and an unconditional branch?
3981 BasicBlock *SuccBB;
3982 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3983 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3984 return nullptr;
3985
3986 // If there are only 2 instructions in the block, at this point,
3987 // this is the call to free and unconditional.
3988 // If there are more than 2 instructions, check that they are noops
3989 // i.e., they won't hurt the performance of the generated code.
3990 if (FreeInstrBB->size() != 2) {
3991 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3992 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3993 continue;
3994 auto *Cast = dyn_cast<CastInst>(&Inst);
3995 if (!Cast || !Cast->isNoopCast(DL))
3996 return nullptr;
3997 }
3998 }
3999 // Validate the rest of constraint #1 by matching on the pred branch.
4000 Instruction *TI = PredBB->getTerminator();
4001 BasicBlock *TrueBB, *FalseBB;
4002 CmpPredicate Pred;
4003 if (!match(TI, m_Br(m_ICmp(Pred,
4005 m_Specific(Op->stripPointerCasts())),
4006 m_Zero()),
4007 TrueBB, FalseBB)))
4008 return nullptr;
4009 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
4010 return nullptr;
4011
4012 // Validate constraint #3: Ensure the null case just falls through.
4013 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
4014 return nullptr;
4015 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
4016 "Broken CFG: missing edge from predecessor to successor");
4017
4018 // At this point, we know that everything in FreeInstrBB can be moved
4019 // before TI.
4020 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
4021 if (&Instr == FreeInstrBBTerminator)
4022 break;
4023 Instr.moveBeforePreserving(TI->getIterator());
4024 }
4025 assert(FreeInstrBB->size() == 1 &&
4026 "Only the branch instruction should remain");
4027
4028 // Now that we've moved the call to free before the NULL check, we have to
4029 // remove any attributes on its parameter that imply it's non-null, because
4030 // those attributes might have only been valid because of the NULL check, and
4031 // we can get miscompiles if we keep them. This is conservative if non-null is
4032 // also implied by something other than the NULL check, but it's guaranteed to
4033 // be correct, and the conservativeness won't matter in practice, since the
4034 // attributes are irrelevant for the call to free itself and the pointer
4035 // shouldn't be used after the call.
4036 AttributeList Attrs = FI.getAttributes();
4037 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
4038 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
4039 if (Dereferenceable.isValid()) {
4040 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
4041 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
4042 Attribute::Dereferenceable);
4043 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
4044 }
4045 FI.setAttributes(Attrs);
4046
4047 return &FI;
4048}
4049
4051 // free undef -> unreachable.
4052 if (isa<UndefValue>(Op)) {
4053 // Leave a marker since we can't modify the CFG here.
4055 return eraseInstFromFunction(FI);
4056 }
4057
4058 // If we have 'free null' delete the instruction. This can happen in stl code
4059 // when lots of inlining happens.
4061 return eraseInstFromFunction(FI);
4062
4063 // If we had free(realloc(...)) with no intervening uses, then eliminate the
4064 // realloc() entirely.
4066 if (CI && CI->hasOneUse())
4067 if (Value *ReallocatedOp = getReallocatedOperand(CI))
4068 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
4069
4070 // If we optimize for code size, try to move the call to free before the null
4071 // test so that simplify cfg can remove the empty block and dead code
4072 // elimination the branch. I.e., helps to turn something like:
4073 // if (foo) free(foo);
4074 // into
4075 // free(foo);
4076 //
4077 // Note that we can only do this for 'free' and not for any flavor of
4078 // 'operator delete'; there is no 'operator delete' symbol for which we are
4079 // permitted to invent a call, even if we're passing in a null pointer.
4080 if (MinimizeSize) {
4081 LibFunc Func;
4082 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
4084 return I;
4085 }
4086
4087 return nullptr;
4088}
4089
4091 Value *RetVal = RI.getReturnValue();
4092 if (!RetVal)
4093 return nullptr;
4094
4095 Function *F = RI.getFunction();
4096 Type *RetTy = RetVal->getType();
4097 if (RetTy->isPointerTy()) {
4098 bool HasDereferenceable =
4099 F->getAttributes().getRetDereferenceableBytes() > 0;
4100 if (F->hasRetAttribute(Attribute::NonNull) ||
4101 (HasDereferenceable &&
4103 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4104 return replaceOperand(RI, 0, V);
4105 }
4106 }
4107
4108 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4109 return nullptr;
4110
4111 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
4112 if (ReturnClass == fcNone)
4113 return nullptr;
4114
4115 KnownFPClass KnownClass;
4116 if (SimplifyDemandedFPClass(&RI, 0, ~ReturnClass, KnownClass))
4117 return &RI;
4118
4119 return nullptr;
4120}
4121
4122// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
4124 // Try to remove the previous instruction if it must lead to unreachable.
4125 // This includes instructions like stores and "llvm.assume" that may not get
4126 // removed by simple dead code elimination.
4127 bool Changed = false;
4128 while (Instruction *Prev = I.getPrevNode()) {
4129 // While we theoretically can erase EH, that would result in a block that
4130 // used to start with an EH no longer starting with EH, which is invalid.
4131 // To make it valid, we'd need to fixup predecessors to no longer refer to
4132 // this block, but that changes CFG, which is not allowed in InstCombine.
4133 if (Prev->isEHPad())
4134 break; // Can not drop any more instructions. We're done here.
4135
4137 break; // Can not drop any more instructions. We're done here.
4138 // Otherwise, this instruction can be freely erased,
4139 // even if it is not side-effect free.
4140
4141 // A value may still have uses before we process it here (for example, in
4142 // another unreachable block), so convert those to poison.
4143 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4144 eraseInstFromFunction(*Prev);
4145 Changed = true;
4146 }
4147 return Changed;
4148}
4149
4154
4156 assert(BI.isUnconditional() && "Only for unconditional branches.");
4157
4158 // If this store is the second-to-last instruction in the basic block
4159 // (excluding debug info) and if the block ends with
4160 // an unconditional branch, try to move the store to the successor block.
4161
4162 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4163 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4164 do {
4165 if (BBI != FirstInstr)
4166 --BBI;
4167 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4168
4169 return dyn_cast<StoreInst>(BBI);
4170 };
4171
4172 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4174 return &BI;
4175
4176 return nullptr;
4177}
4178
4181 if (!DeadEdges.insert({From, To}).second)
4182 return;
4183
4184 // Replace phi node operands in successor with poison.
4185 for (PHINode &PN : To->phis())
4186 for (Use &U : PN.incoming_values())
4187 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4188 replaceUse(U, PoisonValue::get(PN.getType()));
4189 addToWorklist(&PN);
4190 MadeIRChange = true;
4191 }
4192
4193 Worklist.push_back(To);
4194}
4195
4196// Under the assumption that I is unreachable, remove it and following
4197// instructions. Changes are reported directly to MadeIRChange.
4200 BasicBlock *BB = I->getParent();
4201 for (Instruction &Inst : make_early_inc_range(
4202 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4203 std::next(I->getReverseIterator())))) {
4204 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4205 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4206 MadeIRChange = true;
4207 }
4208 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4209 continue;
4210 // RemoveDIs: erase debug-info on this instruction manually.
4211 Inst.dropDbgRecords();
4213 MadeIRChange = true;
4214 }
4215
4218 MadeIRChange = true;
4219 for (Value *V : Changed)
4221 }
4222
4223 // Handle potentially dead successors.
4224 for (BasicBlock *Succ : successors(BB))
4225 addDeadEdge(BB, Succ, Worklist);
4226}
4227
4230 while (!Worklist.empty()) {
4231 BasicBlock *BB = Worklist.pop_back_val();
4232 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4233 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4234 }))
4235 continue;
4236
4238 }
4239}
4240
4242 BasicBlock *LiveSucc) {
4244 for (BasicBlock *Succ : successors(BB)) {
4245 // The live successor isn't dead.
4246 if (Succ == LiveSucc)
4247 continue;
4248
4249 addDeadEdge(BB, Succ, Worklist);
4250 }
4251
4253}
4254
4256 if (BI.isUnconditional())
4258
4259 // Change br (not X), label True, label False to: br X, label False, True
4260 Value *Cond = BI.getCondition();
4261 Value *X;
4262 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4263 // Swap Destinations and condition...
4264 BI.swapSuccessors();
4265 if (BPI)
4266 BPI->swapSuccEdgesProbabilities(BI.getParent());
4267 return replaceOperand(BI, 0, X);
4268 }
4269
4270 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4271 // This is done by inverting the condition and swapping successors:
4272 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4273 Value *Y;
4274 if (isa<SelectInst>(Cond) &&
4275 match(Cond,
4277 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4278 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4279 BI.swapSuccessors();
4280 if (BPI)
4281 BPI->swapSuccEdgesProbabilities(BI.getParent());
4282 return replaceOperand(BI, 0, Or);
4283 }
4284
4285 // If the condition is irrelevant, remove the use so that other
4286 // transforms on the condition become more effective.
4287 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4288 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4289
4290 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4291 CmpPredicate Pred;
4292 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4293 !isCanonicalPredicate(Pred)) {
4294 // Swap destinations and condition.
4295 auto *Cmp = cast<CmpInst>(Cond);
4296 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4297 BI.swapSuccessors();
4298 if (BPI)
4299 BPI->swapSuccEdgesProbabilities(BI.getParent());
4300 Worklist.push(Cmp);
4301 return &BI;
4302 }
4303
4304 if (isa<UndefValue>(Cond)) {
4305 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4306 return nullptr;
4307 }
4308 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4310 BI.getSuccessor(!CI->getZExtValue()));
4311 return nullptr;
4312 }
4313
4314 // Replace all dominated uses of the condition with true/false
4315 // Ignore constant expressions to avoid iterating over uses on other
4316 // functions.
4317 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4318 for (auto &U : make_early_inc_range(Cond->uses())) {
4319 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4320 if (DT.dominates(Edge0, U)) {
4321 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4322 addToWorklist(cast<Instruction>(U.getUser()));
4323 continue;
4324 }
4325 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4326 if (DT.dominates(Edge1, U)) {
4327 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4328 addToWorklist(cast<Instruction>(U.getUser()));
4329 }
4330 }
4331 }
4332
4333 DC.registerBranch(&BI);
4334 return nullptr;
4335}
4336
4337// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4338// we can prove that both (switch C) and (switch X) go to the default when cond
4339// is false/true.
4342 bool IsTrueArm) {
4343 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4344 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4345 if (!C)
4346 return nullptr;
4347
4348 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4349 if (CstBB != SI.getDefaultDest())
4350 return nullptr;
4351 Value *X = Select->getOperand(3 - CstOpIdx);
4352 CmpPredicate Pred;
4353 const APInt *RHSC;
4354 if (!match(Select->getCondition(),
4355 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4356 return nullptr;
4357 if (IsTrueArm)
4358 Pred = ICmpInst::getInversePredicate(Pred);
4359
4360 // See whether we can replace the select with X
4362 for (auto Case : SI.cases())
4363 if (!CR.contains(Case.getCaseValue()->getValue()))
4364 return nullptr;
4365
4366 return X;
4367}
4368
4370 Value *Cond = SI.getCondition();
4371 Value *Op0;
4372 const APInt *CondOpC;
4373 using InvertFn = std::function<APInt(const APInt &Case, const APInt &C)>;
4374
4375 auto MaybeInvertible = [&](Value *Cond) -> InvertFn {
4376 if (match(Cond, m_Add(m_Value(Op0), m_APInt(CondOpC))))
4377 // Change 'switch (X+C) case Case:' into 'switch (X) case Case-C'.
4378 return [](const APInt &Case, const APInt &C) { return Case - C; };
4379
4380 if (match(Cond, m_Sub(m_APInt(CondOpC), m_Value(Op0))))
4381 // Change 'switch (C-X) case Case:' into 'switch (X) case C-Case'.
4382 return [](const APInt &Case, const APInt &C) { return C - Case; };
4383
4384 if (match(Cond, m_Xor(m_Value(Op0), m_APInt(CondOpC))) &&
4385 !CondOpC->isMinSignedValue() && !CondOpC->isMaxSignedValue())
4386 // Change 'switch (X^C) case Case:' into 'switch (X) case Case^C'.
4387 // Prevent creation of large case values by excluding extremes.
4388 return [](const APInt &Case, const APInt &C) { return Case ^ C; };
4389
4390 return nullptr;
4391 };
4392
4393 // Attempt to invert and simplify the switch condition, as long as the
4394 // condition is not used further, as it may not be profitable otherwise.
4395 if (auto InvertFn = MaybeInvertible(Cond); InvertFn && Cond->hasOneUse()) {
4396 for (auto &Case : SI.cases()) {
4397 const APInt &New = InvertFn(Case.getCaseValue()->getValue(), *CondOpC);
4398 Case.setValue(ConstantInt::get(SI.getContext(), New));
4399 }
4400 return replaceOperand(SI, 0, Op0);
4401 }
4402
4403 uint64_t ShiftAmt;
4404 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4405 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4406 all_of(SI.cases(), [&](const auto &Case) {
4407 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4408 })) {
4409 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4411 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4412 Shl->hasOneUse()) {
4413 Value *NewCond = Op0;
4414 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4415 // If the shift may wrap, we need to mask off the shifted bits.
4416 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4417 NewCond = Builder.CreateAnd(
4418 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4419 }
4420 for (auto Case : SI.cases()) {
4421 const APInt &CaseVal = Case.getCaseValue()->getValue();
4422 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4423 : CaseVal.lshr(ShiftAmt);
4424 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4425 }
4426 return replaceOperand(SI, 0, NewCond);
4427 }
4428 }
4429
4430 // Fold switch(zext/sext(X)) into switch(X) if possible.
4431 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4432 bool IsZExt = isa<ZExtInst>(Cond);
4433 Type *SrcTy = Op0->getType();
4434 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4435
4436 if (all_of(SI.cases(), [&](const auto &Case) {
4437 const APInt &CaseVal = Case.getCaseValue()->getValue();
4438 return IsZExt ? CaseVal.isIntN(NewWidth)
4439 : CaseVal.isSignedIntN(NewWidth);
4440 })) {
4441 for (auto &Case : SI.cases()) {
4442 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4443 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4444 }
4445 return replaceOperand(SI, 0, Op0);
4446 }
4447 }
4448
4449 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4450 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4451 if (Value *V =
4452 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4453 return replaceOperand(SI, 0, V);
4454 if (Value *V =
4455 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4456 return replaceOperand(SI, 0, V);
4457 }
4458
4459 KnownBits Known = computeKnownBits(Cond, &SI);
4460 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4461 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4462
4463 // Compute the number of leading bits we can ignore.
4464 // TODO: A better way to determine this would use ComputeNumSignBits().
4465 for (const auto &C : SI.cases()) {
4466 LeadingKnownZeros =
4467 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4468 LeadingKnownOnes =
4469 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4470 }
4471
4472 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4473
4474 // Shrink the condition operand if the new type is smaller than the old type.
4475 // But do not shrink to a non-standard type, because backend can't generate
4476 // good code for that yet.
4477 // TODO: We can make it aggressive again after fixing PR39569.
4478 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4479 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4480 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4481 Builder.SetInsertPoint(&SI);
4482 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4483
4484 for (auto Case : SI.cases()) {
4485 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4486 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4487 }
4488 return replaceOperand(SI, 0, NewCond);
4489 }
4490
4491 if (isa<UndefValue>(Cond)) {
4492 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4493 return nullptr;
4494 }
4495 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4497 SI.findCaseValue(CI)->getCaseSuccessor());
4498 return nullptr;
4499 }
4500
4501 return nullptr;
4502}
4503
4505InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4507 if (!WO)
4508 return nullptr;
4509
4510 Intrinsic::ID OvID = WO->getIntrinsicID();
4511 const APInt *C = nullptr;
4512 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4513 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4514 OvID == Intrinsic::umul_with_overflow)) {
4515 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4516 if (C->isAllOnes())
4517 return BinaryOperator::CreateNeg(WO->getLHS());
4518 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4519 if (C->isPowerOf2()) {
4520 return BinaryOperator::CreateShl(
4521 WO->getLHS(),
4522 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4523 }
4524 }
4525 }
4526
4527 // We're extracting from an overflow intrinsic. See if we're the only user.
4528 // That allows us to simplify multiple result intrinsics to simpler things
4529 // that just get one value.
4530 if (!WO->hasOneUse())
4531 return nullptr;
4532
4533 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4534 // and replace it with a traditional binary instruction.
4535 if (*EV.idx_begin() == 0) {
4536 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4537 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4538 // Replace the old instruction's uses with poison.
4539 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4541 return BinaryOperator::Create(BinOp, LHS, RHS);
4542 }
4543
4544 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4545
4546 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4547 if (OvID == Intrinsic::usub_with_overflow)
4548 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4549
4550 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4551 // +1 is not possible because we assume signed values.
4552 if (OvID == Intrinsic::smul_with_overflow &&
4553 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4554 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4555
4556 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4557 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4558 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4559 // Only handle even bitwidths for performance reasons.
4560 if (BitWidth % 2 == 0)
4561 return new ICmpInst(
4562 ICmpInst::ICMP_UGT, WO->getLHS(),
4563 ConstantInt::get(WO->getLHS()->getType(),
4565 }
4566
4567 // If only the overflow result is used, and the right hand side is a
4568 // constant (or constant splat), we can remove the intrinsic by directly
4569 // checking for overflow.
4570 if (C) {
4571 // Compute the no-wrap range for LHS given RHS=C, then construct an
4572 // equivalent icmp, potentially using an offset.
4573 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4574 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4575
4576 CmpInst::Predicate Pred;
4577 APInt NewRHSC, Offset;
4578 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4579 auto *OpTy = WO->getRHS()->getType();
4580 auto *NewLHS = WO->getLHS();
4581 if (Offset != 0)
4582 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4583 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4584 ConstantInt::get(OpTy, NewRHSC));
4585 }
4586
4587 return nullptr;
4588}
4589
4592 InstCombiner::BuilderTy &Builder) {
4593 // Helper to fold frexp of select to select of frexp.
4594
4595 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4596 return nullptr;
4598 Value *TrueVal = SelectInst->getTrueValue();
4599 Value *FalseVal = SelectInst->getFalseValue();
4600
4601 const APFloat *ConstVal = nullptr;
4602 Value *VarOp = nullptr;
4603 bool ConstIsTrue = false;
4604
4605 if (match(TrueVal, m_APFloat(ConstVal))) {
4606 VarOp = FalseVal;
4607 ConstIsTrue = true;
4608 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4609 VarOp = TrueVal;
4610 ConstIsTrue = false;
4611 } else {
4612 return nullptr;
4613 }
4614
4615 Builder.SetInsertPoint(&EV);
4616
4617 CallInst *NewFrexp =
4618 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4619 NewFrexp->copyIRFlags(FrexpCall);
4620
4621 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4622
4623 int Exp;
4624 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4625
4626 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4627
4628 Value *NewSel = Builder.CreateSelectFMF(
4629 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4630 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4631 return NewSel;
4632}
4634 Value *Agg = EV.getAggregateOperand();
4635
4636 if (!EV.hasIndices())
4637 return replaceInstUsesWith(EV, Agg);
4638
4639 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4640 SQ.getWithInstruction(&EV)))
4641 return replaceInstUsesWith(EV, V);
4642
4643 Value *Cond, *TrueVal, *FalseVal;
4645 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4646 auto *SelInst =
4647 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4648 if (Value *Result =
4649 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4650 return replaceInstUsesWith(EV, Result);
4651 }
4653 // We're extracting from an insertvalue instruction, compare the indices
4654 const unsigned *exti, *exte, *insi, *inse;
4655 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4656 exte = EV.idx_end(), inse = IV->idx_end();
4657 exti != exte && insi != inse;
4658 ++exti, ++insi) {
4659 if (*insi != *exti)
4660 // The insert and extract both reference distinctly different elements.
4661 // This means the extract is not influenced by the insert, and we can
4662 // replace the aggregate operand of the extract with the aggregate
4663 // operand of the insert. i.e., replace
4664 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4665 // %E = extractvalue { i32, { i32 } } %I, 0
4666 // with
4667 // %E = extractvalue { i32, { i32 } } %A, 0
4668 return ExtractValueInst::Create(IV->getAggregateOperand(),
4669 EV.getIndices());
4670 }
4671 if (exti == exte && insi == inse)
4672 // Both iterators are at the end: Index lists are identical. Replace
4673 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4674 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4675 // with "i32 42"
4676 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4677 if (exti == exte) {
4678 // The extract list is a prefix of the insert list. i.e. replace
4679 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4680 // %E = extractvalue { i32, { i32 } } %I, 1
4681 // with
4682 // %X = extractvalue { i32, { i32 } } %A, 1
4683 // %E = insertvalue { i32 } %X, i32 42, 0
4684 // by switching the order of the insert and extract (though the
4685 // insertvalue should be left in, since it may have other uses).
4686 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4687 EV.getIndices());
4688 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4689 ArrayRef(insi, inse));
4690 }
4691 if (insi == inse)
4692 // The insert list is a prefix of the extract list
4693 // We can simply remove the common indices from the extract and make it
4694 // operate on the inserted value instead of the insertvalue result.
4695 // i.e., replace
4696 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4697 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4698 // with
4699 // %E extractvalue { i32 } { i32 42 }, 0
4700 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4701 ArrayRef(exti, exte));
4702 }
4703
4704 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4705 return R;
4706
4707 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4708 // Bail out if the aggregate contains scalable vector type
4709 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4710 STy && STy->isScalableTy())
4711 return nullptr;
4712
4713 // If the (non-volatile) load only has one use, we can rewrite this to a
4714 // load from a GEP. This reduces the size of the load. If a load is used
4715 // only by extractvalue instructions then this either must have been
4716 // optimized before, or it is a struct with padding, in which case we
4717 // don't want to do the transformation as it loses padding knowledge.
4718 if (L->isSimple() && L->hasOneUse()) {
4719 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4720 SmallVector<Value*, 4> Indices;
4721 // Prefix an i32 0 since we need the first element.
4722 Indices.push_back(Builder.getInt32(0));
4723 for (unsigned Idx : EV.indices())
4724 Indices.push_back(Builder.getInt32(Idx));
4725
4726 // We need to insert these at the location of the old load, not at that of
4727 // the extractvalue.
4728 Builder.SetInsertPoint(L);
4729 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4730 L->getPointerOperand(), Indices);
4731 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4732 // Whatever aliasing information we had for the orignal load must also
4733 // hold for the smaller load, so propagate the annotations.
4734 NL->setAAMetadata(L->getAAMetadata());
4735 // Returning the load directly will cause the main loop to insert it in
4736 // the wrong spot, so use replaceInstUsesWith().
4737 return replaceInstUsesWith(EV, NL);
4738 }
4739 }
4740
4741 if (auto *PN = dyn_cast<PHINode>(Agg))
4742 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4743 return Res;
4744
4745 // Canonicalize extract (select Cond, TV, FV)
4746 // -> select cond, (extract TV), (extract FV)
4747 if (auto *SI = dyn_cast<SelectInst>(Agg))
4748 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4749 return R;
4750
4751 // We could simplify extracts from other values. Note that nested extracts may
4752 // already be simplified implicitly by the above: extract (extract (insert) )
4753 // will be translated into extract ( insert ( extract ) ) first and then just
4754 // the value inserted, if appropriate. Similarly for extracts from single-use
4755 // loads: extract (extract (load)) will be translated to extract (load (gep))
4756 // and if again single-use then via load (gep (gep)) to load (gep).
4757 // However, double extracts from e.g. function arguments or return values
4758 // aren't handled yet.
4759 return nullptr;
4760}
4761
4762/// Return 'true' if the given typeinfo will match anything.
4763static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4764 switch (Personality) {
4768 // The GCC C EH and Rust personality only exists to support cleanups, so
4769 // it's not clear what the semantics of catch clauses are.
4770 return false;
4772 return false;
4774 // While __gnat_all_others_value will match any Ada exception, it doesn't
4775 // match foreign exceptions (or didn't, before gcc-4.7).
4776 return false;
4787 return TypeInfo->isNullValue();
4788 }
4789 llvm_unreachable("invalid enum");
4790}
4791
4792static bool shorter_filter(const Value *LHS, const Value *RHS) {
4793 return
4794 cast<ArrayType>(LHS->getType())->getNumElements()
4795 <
4796 cast<ArrayType>(RHS->getType())->getNumElements();
4797}
4798
4800 // The logic here should be correct for any real-world personality function.
4801 // However if that turns out not to be true, the offending logic can always
4802 // be conditioned on the personality function, like the catch-all logic is.
4803 EHPersonality Personality =
4804 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4805
4806 // Simplify the list of clauses, eg by removing repeated catch clauses
4807 // (these are often created by inlining).
4808 bool MakeNewInstruction = false; // If true, recreate using the following:
4809 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4810 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4811
4812 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4813 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4814 bool isLastClause = i + 1 == e;
4815 if (LI.isCatch(i)) {
4816 // A catch clause.
4817 Constant *CatchClause = LI.getClause(i);
4818 Constant *TypeInfo = CatchClause->stripPointerCasts();
4819
4820 // If we already saw this clause, there is no point in having a second
4821 // copy of it.
4822 if (AlreadyCaught.insert(TypeInfo).second) {
4823 // This catch clause was not already seen.
4824 NewClauses.push_back(CatchClause);
4825 } else {
4826 // Repeated catch clause - drop the redundant copy.
4827 MakeNewInstruction = true;
4828 }
4829
4830 // If this is a catch-all then there is no point in keeping any following
4831 // clauses or marking the landingpad as having a cleanup.
4832 if (isCatchAll(Personality, TypeInfo)) {
4833 if (!isLastClause)
4834 MakeNewInstruction = true;
4835 CleanupFlag = false;
4836 break;
4837 }
4838 } else {
4839 // A filter clause. If any of the filter elements were already caught
4840 // then they can be dropped from the filter. It is tempting to try to
4841 // exploit the filter further by saying that any typeinfo that does not
4842 // occur in the filter can't be caught later (and thus can be dropped).
4843 // However this would be wrong, since typeinfos can match without being
4844 // equal (for example if one represents a C++ class, and the other some
4845 // class derived from it).
4846 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4847 Constant *FilterClause = LI.getClause(i);
4848 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4849 unsigned NumTypeInfos = FilterType->getNumElements();
4850
4851 // An empty filter catches everything, so there is no point in keeping any
4852 // following clauses or marking the landingpad as having a cleanup. By
4853 // dealing with this case here the following code is made a bit simpler.
4854 if (!NumTypeInfos) {
4855 NewClauses.push_back(FilterClause);
4856 if (!isLastClause)
4857 MakeNewInstruction = true;
4858 CleanupFlag = false;
4859 break;
4860 }
4861
4862 bool MakeNewFilter = false; // If true, make a new filter.
4863 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4864 if (isa<ConstantAggregateZero>(FilterClause)) {
4865 // Not an empty filter - it contains at least one null typeinfo.
4866 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4867 Constant *TypeInfo =
4869 // If this typeinfo is a catch-all then the filter can never match.
4870 if (isCatchAll(Personality, TypeInfo)) {
4871 // Throw the filter away.
4872 MakeNewInstruction = true;
4873 continue;
4874 }
4875
4876 // There is no point in having multiple copies of this typeinfo, so
4877 // discard all but the first copy if there is more than one.
4878 NewFilterElts.push_back(TypeInfo);
4879 if (NumTypeInfos > 1)
4880 MakeNewFilter = true;
4881 } else {
4882 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4883 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4884 NewFilterElts.reserve(NumTypeInfos);
4885
4886 // Remove any filter elements that were already caught or that already
4887 // occurred in the filter. While there, see if any of the elements are
4888 // catch-alls. If so, the filter can be discarded.
4889 bool SawCatchAll = false;
4890 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4891 Constant *Elt = Filter->getOperand(j);
4892 Constant *TypeInfo = Elt->stripPointerCasts();
4893 if (isCatchAll(Personality, TypeInfo)) {
4894 // This element is a catch-all. Bail out, noting this fact.
4895 SawCatchAll = true;
4896 break;
4897 }
4898
4899 // Even if we've seen a type in a catch clause, we don't want to
4900 // remove it from the filter. An unexpected type handler may be
4901 // set up for a call site which throws an exception of the same
4902 // type caught. In order for the exception thrown by the unexpected
4903 // handler to propagate correctly, the filter must be correctly
4904 // described for the call site.
4905 //
4906 // Example:
4907 //
4908 // void unexpected() { throw 1;}
4909 // void foo() throw (int) {
4910 // std::set_unexpected(unexpected);
4911 // try {
4912 // throw 2.0;
4913 // } catch (int i) {}
4914 // }
4915
4916 // There is no point in having multiple copies of the same typeinfo in
4917 // a filter, so only add it if we didn't already.
4918 if (SeenInFilter.insert(TypeInfo).second)
4919 NewFilterElts.push_back(cast<Constant>(Elt));
4920 }
4921 // A filter containing a catch-all cannot match anything by definition.
4922 if (SawCatchAll) {
4923 // Throw the filter away.
4924 MakeNewInstruction = true;
4925 continue;
4926 }
4927
4928 // If we dropped something from the filter, make a new one.
4929 if (NewFilterElts.size() < NumTypeInfos)
4930 MakeNewFilter = true;
4931 }
4932 if (MakeNewFilter) {
4933 FilterType = ArrayType::get(FilterType->getElementType(),
4934 NewFilterElts.size());
4935 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4936 MakeNewInstruction = true;
4937 }
4938
4939 NewClauses.push_back(FilterClause);
4940
4941 // If the new filter is empty then it will catch everything so there is
4942 // no point in keeping any following clauses or marking the landingpad
4943 // as having a cleanup. The case of the original filter being empty was
4944 // already handled above.
4945 if (MakeNewFilter && !NewFilterElts.size()) {
4946 assert(MakeNewInstruction && "New filter but not a new instruction!");
4947 CleanupFlag = false;
4948 break;
4949 }
4950 }
4951 }
4952
4953 // If several filters occur in a row then reorder them so that the shortest
4954 // filters come first (those with the smallest number of elements). This is
4955 // advantageous because shorter filters are more likely to match, speeding up
4956 // unwinding, but mostly because it increases the effectiveness of the other
4957 // filter optimizations below.
4958 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4959 unsigned j;
4960 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4961 for (j = i; j != e; ++j)
4962 if (!isa<ArrayType>(NewClauses[j]->getType()))
4963 break;
4964
4965 // Check whether the filters are already sorted by length. We need to know
4966 // if sorting them is actually going to do anything so that we only make a
4967 // new landingpad instruction if it does.
4968 for (unsigned k = i; k + 1 < j; ++k)
4969 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4970 // Not sorted, so sort the filters now. Doing an unstable sort would be
4971 // correct too but reordering filters pointlessly might confuse users.
4972 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4974 MakeNewInstruction = true;
4975 break;
4976 }
4977
4978 // Look for the next batch of filters.
4979 i = j + 1;
4980 }
4981
4982 // If typeinfos matched if and only if equal, then the elements of a filter L
4983 // that occurs later than a filter F could be replaced by the intersection of
4984 // the elements of F and L. In reality two typeinfos can match without being
4985 // equal (for example if one represents a C++ class, and the other some class
4986 // derived from it) so it would be wrong to perform this transform in general.
4987 // However the transform is correct and useful if F is a subset of L. In that
4988 // case L can be replaced by F, and thus removed altogether since repeating a
4989 // filter is pointless. So here we look at all pairs of filters F and L where
4990 // L follows F in the list of clauses, and remove L if every element of F is
4991 // an element of L. This can occur when inlining C++ functions with exception
4992 // specifications.
4993 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4994 // Examine each filter in turn.
4995 Value *Filter = NewClauses[i];
4996 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4997 if (!FTy)
4998 // Not a filter - skip it.
4999 continue;
5000 unsigned FElts = FTy->getNumElements();
5001 // Examine each filter following this one. Doing this backwards means that
5002 // we don't have to worry about filters disappearing under us when removed.
5003 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
5004 Value *LFilter = NewClauses[j];
5005 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
5006 if (!LTy)
5007 // Not a filter - skip it.
5008 continue;
5009 // If Filter is a subset of LFilter, i.e. every element of Filter is also
5010 // an element of LFilter, then discard LFilter.
5011 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
5012 // If Filter is empty then it is a subset of LFilter.
5013 if (!FElts) {
5014 // Discard LFilter.
5015 NewClauses.erase(J);
5016 MakeNewInstruction = true;
5017 // Move on to the next filter.
5018 continue;
5019 }
5020 unsigned LElts = LTy->getNumElements();
5021 // If Filter is longer than LFilter then it cannot be a subset of it.
5022 if (FElts > LElts)
5023 // Move on to the next filter.
5024 continue;
5025 // At this point we know that LFilter has at least one element.
5026 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
5027 // Filter is a subset of LFilter iff Filter contains only zeros (as we
5028 // already know that Filter is not longer than LFilter).
5030 assert(FElts <= LElts && "Should have handled this case earlier!");
5031 // Discard LFilter.
5032 NewClauses.erase(J);
5033 MakeNewInstruction = true;
5034 }
5035 // Move on to the next filter.
5036 continue;
5037 }
5038 ConstantArray *LArray = cast<ConstantArray>(LFilter);
5039 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
5040 // Since Filter is non-empty and contains only zeros, it is a subset of
5041 // LFilter iff LFilter contains a zero.
5042 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
5043 for (unsigned l = 0; l != LElts; ++l)
5044 if (LArray->getOperand(l)->isNullValue()) {
5045 // LFilter contains a zero - discard it.
5046 NewClauses.erase(J);
5047 MakeNewInstruction = true;
5048 break;
5049 }
5050 // Move on to the next filter.
5051 continue;
5052 }
5053 // At this point we know that both filters are ConstantArrays. Loop over
5054 // operands to see whether every element of Filter is also an element of
5055 // LFilter. Since filters tend to be short this is probably faster than
5056 // using a method that scales nicely.
5058 bool AllFound = true;
5059 for (unsigned f = 0; f != FElts; ++f) {
5060 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
5061 AllFound = false;
5062 for (unsigned l = 0; l != LElts; ++l) {
5063 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
5064 if (LTypeInfo == FTypeInfo) {
5065 AllFound = true;
5066 break;
5067 }
5068 }
5069 if (!AllFound)
5070 break;
5071 }
5072 if (AllFound) {
5073 // Discard LFilter.
5074 NewClauses.erase(J);
5075 MakeNewInstruction = true;
5076 }
5077 // Move on to the next filter.
5078 }
5079 }
5080
5081 // If we changed any of the clauses, replace the old landingpad instruction
5082 // with a new one.
5083 if (MakeNewInstruction) {
5085 NewClauses.size());
5086 for (Constant *C : NewClauses)
5087 NLI->addClause(C);
5088 // A landing pad with no clauses must have the cleanup flag set. It is
5089 // theoretically possible, though highly unlikely, that we eliminated all
5090 // clauses. If so, force the cleanup flag to true.
5091 if (NewClauses.empty())
5092 CleanupFlag = true;
5093 NLI->setCleanup(CleanupFlag);
5094 return NLI;
5095 }
5096
5097 // Even if none of the clauses changed, we may nonetheless have understood
5098 // that the cleanup flag is pointless. Clear it if so.
5099 if (LI.isCleanup() != CleanupFlag) {
5100 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
5101 LI.setCleanup(CleanupFlag);
5102 return &LI;
5103 }
5104
5105 return nullptr;
5106}
5107
5108Value *
5110 // Try to push freeze through instructions that propagate but don't produce
5111 // poison as far as possible. If an operand of freeze does not produce poison
5112 // then push the freeze through to the operands that are not guaranteed
5113 // non-poison. The actual transform is as follows.
5114 // Op1 = ... ; Op1 can be poison
5115 // Op0 = Inst(Op1, NonPoisonOps...)
5116 // ... = Freeze(Op0)
5117 // =>
5118 // Op1 = ...
5119 // Op1.fr = Freeze(Op1)
5120 // ... = Inst(Op1.fr, NonPoisonOps...)
5121
5122 auto CanPushFreeze = [](Value *V) {
5123 if (!isa<Instruction>(V) || isa<PHINode>(V))
5124 return false;
5125
5126 // We can't push the freeze through an instruction which can itself create
5127 // poison. If the only source of new poison is flags, we can simply
5128 // strip them (since we know the only use is the freeze and nothing can
5129 // benefit from them.)
5131 /*ConsiderFlagsAndMetadata*/ false);
5132 };
5133
5134 // Pushing freezes up long instruction chains can be expensive. Instead,
5135 // we directly push the freeze all the way to the leaves. However, we leave
5136 // deduplication of freezes on the same value for freezeOtherUses().
5137 Use *OrigUse = &OrigFI.getOperandUse(0);
5140 Worklist.push_back(OrigUse);
5141 while (!Worklist.empty()) {
5142 auto *U = Worklist.pop_back_val();
5143 Value *V = U->get();
5144 if (!CanPushFreeze(V)) {
5145 // If we can't push through the original instruction, abort the transform.
5146 if (U == OrigUse)
5147 return nullptr;
5148
5149 auto *UserI = cast<Instruction>(U->getUser());
5150 Builder.SetInsertPoint(UserI);
5151 Value *Frozen = Builder.CreateFreeze(V, V->getName() + ".fr");
5152 U->set(Frozen);
5153 continue;
5154 }
5155
5156 auto *I = cast<Instruction>(V);
5157 if (!Visited.insert(I).second)
5158 continue;
5159
5160 // reverse() to emit freezes in a more natural order.
5161 for (Use &Op : reverse(I->operands())) {
5162 Value *OpV = Op.get();
5164 continue;
5165 Worklist.push_back(&Op);
5166 }
5167
5168 I->dropPoisonGeneratingAnnotations();
5169 this->Worklist.add(I);
5170 }
5171
5172 return OrigUse->get();
5173}
5174
5176 PHINode *PN) {
5177 // Detect whether this is a recurrence with a start value and some number of
5178 // backedge values. We'll check whether we can push the freeze through the
5179 // backedge values (possibly dropping poison flags along the way) until we
5180 // reach the phi again. In that case, we can move the freeze to the start
5181 // value.
5182 Use *StartU = nullptr;
5184 for (Use &U : PN->incoming_values()) {
5185 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5186 // Add backedge value to worklist.
5187 Worklist.push_back(U.get());
5188 continue;
5189 }
5190
5191 // Don't bother handling multiple start values.
5192 if (StartU)
5193 return nullptr;
5194 StartU = &U;
5195 }
5196
5197 if (!StartU || Worklist.empty())
5198 return nullptr; // Not a recurrence.
5199
5200 Value *StartV = StartU->get();
5201 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5202 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5203 // We can't insert freeze if the start value is the result of the
5204 // terminator (e.g. an invoke).
5205 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5206 return nullptr;
5207
5210 while (!Worklist.empty()) {
5211 Value *V = Worklist.pop_back_val();
5212 if (!Visited.insert(V).second)
5213 continue;
5214
5215 if (Visited.size() > 32)
5216 return nullptr; // Limit the total number of values we inspect.
5217
5218 // Assume that PN is non-poison, because it will be after the transform.
5219 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5220 continue;
5221
5224 /*ConsiderFlagsAndMetadata*/ false))
5225 return nullptr;
5226
5227 DropFlags.push_back(I);
5228 append_range(Worklist, I->operands());
5229 }
5230
5231 for (Instruction *I : DropFlags)
5232 I->dropPoisonGeneratingAnnotations();
5233
5234 if (StartNeedsFreeze) {
5235 Builder.SetInsertPoint(StartBB->getTerminator());
5236 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5237 StartV->getName() + ".fr");
5238 replaceUse(*StartU, FrozenStartV);
5239 }
5240 return replaceInstUsesWith(FI, PN);
5241}
5242
5244 Value *Op = FI.getOperand(0);
5245
5246 if (isa<Constant>(Op) || Op->hasOneUse())
5247 return false;
5248
5249 // Move the freeze directly after the definition of its operand, so that
5250 // it dominates the maximum number of uses. Note that it may not dominate
5251 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5252 // the normal/default destination. This is why the domination check in the
5253 // replacement below is still necessary.
5254 BasicBlock::iterator MoveBefore;
5255 if (isa<Argument>(Op)) {
5256 MoveBefore =
5258 } else {
5259 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5260 if (!MoveBeforeOpt)
5261 return false;
5262 MoveBefore = *MoveBeforeOpt;
5263 }
5264
5265 // Re-point iterator to come after any debug-info records.
5266 MoveBefore.setHeadBit(false);
5267
5268 bool Changed = false;
5269 if (&FI != &*MoveBefore) {
5270 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5271 Changed = true;
5272 }
5273
5274 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5275 bool Dominates = DT.dominates(&FI, U);
5276 Changed |= Dominates;
5277 return Dominates;
5278 });
5279
5280 return Changed;
5281}
5282
5283// Check if any direct or bitcast user of this value is a shuffle instruction.
5285 for (auto *U : V->users()) {
5287 return true;
5288 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5289 return true;
5290 }
5291 return false;
5292}
5293
5295 Value *Op0 = I.getOperand(0);
5296
5297 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5298 return replaceInstUsesWith(I, V);
5299
5300 // freeze (phi const, x) --> phi const, (freeze x)
5301 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5302 if (Instruction *NV = foldOpIntoPhi(I, PN))
5303 return NV;
5304 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5305 return NV;
5306 }
5307
5309 return replaceInstUsesWith(I, NI);
5310
5311 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5312 // - or: pick -1
5313 // - select's condition: if the true value is constant, choose it by making
5314 // the condition true.
5315 // - phi: pick the common constant across operands
5316 // - default: pick 0
5317 //
5318 // Note that this transform is intentionally done here rather than
5319 // via an analysis in InstSimplify or at individual user sites. That is
5320 // because we must produce the same value for all uses of the freeze -
5321 // it's the reason "freeze" exists!
5322 //
5323 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5324 // duplicating logic for binops at least.
5325 auto getUndefReplacement = [&](Type *Ty) {
5326 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5327 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5328 // removed.
5329 Constant *BestValue = nullptr;
5330 for (Value *V : PN.incoming_values()) {
5331 if (match(V, m_Freeze(m_Undef())))
5332 continue;
5333
5335 if (!C)
5336 return nullptr;
5337
5339 return nullptr;
5340
5341 if (BestValue && BestValue != C)
5342 return nullptr;
5343
5344 BestValue = C;
5345 }
5346 return BestValue;
5347 };
5348
5349 Value *NullValue = Constant::getNullValue(Ty);
5350 Value *BestValue = nullptr;
5351 for (auto *U : I.users()) {
5352 Value *V = NullValue;
5353 if (match(U, m_Or(m_Value(), m_Value())))
5355 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5356 V = ConstantInt::getTrue(Ty);
5357 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5358 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5359 V = NullValue;
5360 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5361 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5362 V = MaybeV;
5363 }
5364
5365 if (!BestValue)
5366 BestValue = V;
5367 else if (BestValue != V)
5368 BestValue = NullValue;
5369 }
5370 assert(BestValue && "Must have at least one use");
5371 assert(BestValue != &I && "Cannot replace with itself");
5372 return BestValue;
5373 };
5374
5375 if (match(Op0, m_Undef())) {
5376 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5377 // a shuffle. This may improve codegen for shuffles that allow
5378 // unspecified inputs.
5380 return nullptr;
5381 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5382 }
5383
5384 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5385 Type *Ty = C->getType();
5386 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5387 if (!VTy)
5388 return nullptr;
5389 unsigned NumElts = VTy->getNumElements();
5390 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5391 for (unsigned i = 0; i != NumElts; ++i) {
5392 Constant *EltC = C->getAggregateElement(i);
5393 if (EltC && !match(EltC, m_Undef())) {
5394 BestValue = EltC;
5395 break;
5396 }
5397 }
5398 return Constant::replaceUndefsWith(C, BestValue);
5399 };
5400
5401 Constant *C;
5402 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5403 !C->containsConstantExpression()) {
5404 if (Constant *Repl = getFreezeVectorReplacement(C))
5405 return replaceInstUsesWith(I, Repl);
5406 }
5407
5408 // Replace uses of Op with freeze(Op).
5409 if (freezeOtherUses(I))
5410 return &I;
5411
5412 return nullptr;
5413}
5414
5415/// Check for case where the call writes to an otherwise dead alloca. This
5416/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5417/// helper *only* analyzes the write; doesn't check any other legality aspect.
5419 auto *CB = dyn_cast<CallBase>(I);
5420 if (!CB)
5421 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5422 // to allow reload along used path as described below. Otherwise, this
5423 // is simply a store to a dead allocation which will be removed.
5424 return false;
5425 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5426 if (!Dest)
5427 return false;
5428 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5429 if (!AI)
5430 // TODO: allow malloc?
5431 return false;
5432 // TODO: allow memory access dominated by move point? Note that since AI
5433 // could have a reference to itself captured by the call, we would need to
5434 // account for cycles in doing so.
5435 SmallVector<const User *> AllocaUsers;
5437 auto pushUsers = [&](const Instruction &I) {
5438 for (const User *U : I.users()) {
5439 if (Visited.insert(U).second)
5440 AllocaUsers.push_back(U);
5441 }
5442 };
5443 pushUsers(*AI);
5444 while (!AllocaUsers.empty()) {
5445 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5446 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5447 pushUsers(*UserI);
5448 continue;
5449 }
5450 if (UserI == CB)
5451 continue;
5452 // TODO: support lifetime.start/end here
5453 return false;
5454 }
5455 return true;
5456}
5457
5458/// Try to move the specified instruction from its current block into the
5459/// beginning of DestBlock, which can only happen if it's safe to move the
5460/// instruction past all of the instructions between it and the end of its
5461/// block.
5463 BasicBlock *DestBlock) {
5464 BasicBlock *SrcBlock = I->getParent();
5465
5466 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5467 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5468 I->isTerminator())
5469 return false;
5470
5471 // Do not sink static or dynamic alloca instructions. Static allocas must
5472 // remain in the entry block, and dynamic allocas must not be sunk in between
5473 // a stacksave / stackrestore pair, which would incorrectly shorten its
5474 // lifetime.
5475 if (isa<AllocaInst>(I))
5476 return false;
5477
5478 // Do not sink into catchswitch blocks.
5479 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5480 return false;
5481
5482 // Do not sink convergent call instructions.
5483 if (auto *CI = dyn_cast<CallInst>(I)) {
5484 if (CI->isConvergent())
5485 return false;
5486 }
5487
5488 // Unless we can prove that the memory write isn't visibile except on the
5489 // path we're sinking to, we must bail.
5490 if (I->mayWriteToMemory()) {
5491 if (!SoleWriteToDeadLocal(I, TLI))
5492 return false;
5493 }
5494
5495 // We can only sink load instructions if there is nothing between the load and
5496 // the end of block that could change the value.
5497 if (I->mayReadFromMemory() &&
5498 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5499 // We don't want to do any sophisticated alias analysis, so we only check
5500 // the instructions after I in I's parent block if we try to sink to its
5501 // successor block.
5502 if (DestBlock->getUniquePredecessor() != I->getParent())
5503 return false;
5504 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5505 E = I->getParent()->end();
5506 Scan != E; ++Scan)
5507 if (Scan->mayWriteToMemory())
5508 return false;
5509 }
5510
5511 I->dropDroppableUses([&](const Use *U) {
5512 auto *I = dyn_cast<Instruction>(U->getUser());
5513 if (I && I->getParent() != DestBlock) {
5514 Worklist.add(I);
5515 return true;
5516 }
5517 return false;
5518 });
5519 /// FIXME: We could remove droppable uses that are not dominated by
5520 /// the new position.
5521
5522 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5523 I->moveBefore(*DestBlock, InsertPos);
5524 ++NumSunkInst;
5525
5526 // Also sink all related debug uses from the source basic block. Otherwise we
5527 // get debug use before the def. Attempt to salvage debug uses first, to
5528 // maximise the range variables have location for. If we cannot salvage, then
5529 // mark the location undef: we know it was supposed to receive a new location
5530 // here, but that computation has been sunk.
5531 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5532 findDbgUsers(I, DbgVariableRecords);
5533 if (!DbgVariableRecords.empty())
5534 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5535 DbgVariableRecords);
5536
5537 // PS: there are numerous flaws with this behaviour, not least that right now
5538 // assignments can be re-ordered past other assignments to the same variable
5539 // if they use different Values. Creating more undef assignements can never be
5540 // undone. And salvaging all users outside of this block can un-necessarily
5541 // alter the lifetime of the live-value that the variable refers to.
5542 // Some of these things can be resolved by tolerating debug use-before-defs in
5543 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5544 // being used for more architectures.
5545
5546 return true;
5547}
5548
5550 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5551 BasicBlock *DestBlock,
5552 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5553 // For all debug values in the destination block, the sunk instruction
5554 // will still be available, so they do not need to be dropped.
5555
5556 // Fetch all DbgVariableRecords not already in the destination.
5557 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5558 for (auto &DVR : DbgVariableRecords)
5559 if (DVR->getParent() != DestBlock)
5560 DbgVariableRecordsToSalvage.push_back(DVR);
5561
5562 // Fetch a second collection, of DbgVariableRecords in the source block that
5563 // we're going to sink.
5564 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5565 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5566 if (DVR->getParent() == SrcBlock)
5567 DbgVariableRecordsToSink.push_back(DVR);
5568
5569 // Sort DbgVariableRecords according to their position in the block. This is a
5570 // partial order: DbgVariableRecords attached to different instructions will
5571 // be ordered by the instruction order, but DbgVariableRecords attached to the
5572 // same instruction won't have an order.
5573 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5574 return B->getInstruction()->comesBefore(A->getInstruction());
5575 };
5576 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5577
5578 // If there are two assignments to the same variable attached to the same
5579 // instruction, the ordering between the two assignments is important. Scan
5580 // for this (rare) case and establish which is the last assignment.
5581 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5583 if (DbgVariableRecordsToSink.size() > 1) {
5585 // Count how many assignments to each variable there is per instruction.
5586 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5587 DebugVariable DbgUserVariable =
5588 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5589 DVR->getDebugLoc()->getInlinedAt());
5590 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5591 }
5592
5593 // If there are any instructions with two assignments, add them to the
5594 // FilterOutMap to record that they need extra filtering.
5596 for (auto It : CountMap) {
5597 if (It.second > 1) {
5598 FilterOutMap[It.first] = nullptr;
5599 DupSet.insert(It.first.first);
5600 }
5601 }
5602
5603 // For all instruction/variable pairs needing extra filtering, find the
5604 // latest assignment.
5605 for (const Instruction *Inst : DupSet) {
5606 for (DbgVariableRecord &DVR :
5607 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5608 DebugVariable DbgUserVariable =
5609 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5610 DVR.getDebugLoc()->getInlinedAt());
5611 auto FilterIt =
5612 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5613 if (FilterIt == FilterOutMap.end())
5614 continue;
5615 if (FilterIt->second != nullptr)
5616 continue;
5617 FilterIt->second = &DVR;
5618 }
5619 }
5620 }
5621
5622 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5623 // out any duplicate assignments identified above.
5625 SmallSet<DebugVariable, 4> SunkVariables;
5626 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5628 continue;
5629
5630 DebugVariable DbgUserVariable =
5631 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5632 DVR->getDebugLoc()->getInlinedAt());
5633
5634 // For any variable where there were multiple assignments in the same place,
5635 // ignore all but the last assignment.
5636 if (!FilterOutMap.empty()) {
5637 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5638 auto It = FilterOutMap.find(IVP);
5639
5640 // Filter out.
5641 if (It != FilterOutMap.end() && It->second != DVR)
5642 continue;
5643 }
5644
5645 if (!SunkVariables.insert(DbgUserVariable).second)
5646 continue;
5647
5648 if (DVR->isDbgAssign())
5649 continue;
5650
5651 DVRClones.emplace_back(DVR->clone());
5652 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5653 }
5654
5655 // Perform salvaging without the clones, then sink the clones.
5656 if (DVRClones.empty())
5657 return;
5658
5659 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5660
5661 // The clones are in reverse order of original appearance. Assert that the
5662 // head bit is set on the iterator as we _should_ have received it via
5663 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5664 // we'll repeatedly insert at the head, such as:
5665 // DVR-3 (third insertion goes here)
5666 // DVR-2 (second insertion goes here)
5667 // DVR-1 (first insertion goes here)
5668 // Any-Prior-DVRs
5669 // InsertPtInst
5670 assert(InsertPos.getHeadBit());
5671 for (DbgVariableRecord *DVRClone : DVRClones) {
5672 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5673 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5674 }
5675}
5676
5678 while (!Worklist.isEmpty()) {
5679 // Walk deferred instructions in reverse order, and push them to the
5680 // worklist, which means they'll end up popped from the worklist in-order.
5681 while (Instruction *I = Worklist.popDeferred()) {
5682 // Check to see if we can DCE the instruction. We do this already here to
5683 // reduce the number of uses and thus allow other folds to trigger.
5684 // Note that eraseInstFromFunction() may push additional instructions on
5685 // the deferred worklist, so this will DCE whole instruction chains.
5688 ++NumDeadInst;
5689 continue;
5690 }
5691
5692 Worklist.push(I);
5693 }
5694
5695 Instruction *I = Worklist.removeOne();
5696 if (I == nullptr) continue; // skip null values.
5697
5698 // Check to see if we can DCE the instruction.
5701 ++NumDeadInst;
5702 continue;
5703 }
5704
5705 if (!DebugCounter::shouldExecute(VisitCounter))
5706 continue;
5707
5708 // See if we can trivially sink this instruction to its user if we can
5709 // prove that the successor is not executed more frequently than our block.
5710 // Return the UserBlock if successful.
5711 auto getOptionalSinkBlockForInst =
5712 [this](Instruction *I) -> std::optional<BasicBlock *> {
5713 if (!EnableCodeSinking)
5714 return std::nullopt;
5715
5716 BasicBlock *BB = I->getParent();
5717 BasicBlock *UserParent = nullptr;
5718 unsigned NumUsers = 0;
5719
5720 for (Use &U : I->uses()) {
5721 User *User = U.getUser();
5722 if (User->isDroppable()) {
5723 // Do not sink if there are dereferenceable assumes that would be
5724 // removed.
5726 if (II->getIntrinsicID() != Intrinsic::assume ||
5727 !II->getOperandBundle("dereferenceable"))
5728 continue;
5729 }
5730
5731 if (NumUsers > MaxSinkNumUsers)
5732 return std::nullopt;
5733
5734 Instruction *UserInst = cast<Instruction>(User);
5735 // Special handling for Phi nodes - get the block the use occurs in.
5736 BasicBlock *UserBB = UserInst->getParent();
5737 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5738 UserBB = PN->getIncomingBlock(U);
5739 // Bail out if we have uses in different blocks. We don't do any
5740 // sophisticated analysis (i.e finding NearestCommonDominator of these
5741 // use blocks).
5742 if (UserParent && UserParent != UserBB)
5743 return std::nullopt;
5744 UserParent = UserBB;
5745
5746 // Make sure these checks are done only once, naturally we do the checks
5747 // the first time we get the userparent, this will save compile time.
5748 if (NumUsers == 0) {
5749 // Try sinking to another block. If that block is unreachable, then do
5750 // not bother. SimplifyCFG should handle it.
5751 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5752 return std::nullopt;
5753
5754 auto *Term = UserParent->getTerminator();
5755 // See if the user is one of our successors that has only one
5756 // predecessor, so that we don't have to split the critical edge.
5757 // Another option where we can sink is a block that ends with a
5758 // terminator that does not pass control to other block (such as
5759 // return or unreachable or resume). In this case:
5760 // - I dominates the User (by SSA form);
5761 // - the User will be executed at most once.
5762 // So sinking I down to User is always profitable or neutral.
5763 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5764 return std::nullopt;
5765
5766 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5767 }
5768
5769 NumUsers++;
5770 }
5771
5772 // No user or only has droppable users.
5773 if (!UserParent)
5774 return std::nullopt;
5775
5776 return UserParent;
5777 };
5778
5779 auto OptBB = getOptionalSinkBlockForInst(I);
5780 if (OptBB) {
5781 auto *UserParent = *OptBB;
5782 // Okay, the CFG is simple enough, try to sink this instruction.
5783 if (tryToSinkInstruction(I, UserParent)) {
5784 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5785 MadeIRChange = true;
5786 // We'll add uses of the sunk instruction below, but since
5787 // sinking can expose opportunities for it's *operands* add
5788 // them to the worklist
5789 for (Use &U : I->operands())
5790 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5791 Worklist.push(OpI);
5792 }
5793 }
5794
5795 // Now that we have an instruction, try combining it to simplify it.
5796 Builder.SetInsertPoint(I);
5797 Builder.CollectMetadataToCopy(
5798 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5799
5800#ifndef NDEBUG
5801 std::string OrigI;
5802#endif
5803 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5804 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5805
5806 if (Instruction *Result = visit(*I)) {
5807 ++NumCombined;
5808 // Should we replace the old instruction with a new one?
5809 if (Result != I) {
5810 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5811 << " New = " << *Result << '\n');
5812
5813 // We copy the old instruction's DebugLoc to the new instruction, unless
5814 // InstCombine already assigned a DebugLoc to it, in which case we
5815 // should trust the more specifically selected DebugLoc.
5816 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5817 // We also copy annotation metadata to the new instruction.
5818 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5819 // Everything uses the new instruction now.
5820 I->replaceAllUsesWith(Result);
5821
5822 // Move the name to the new instruction first.
5823 Result->takeName(I);
5824
5825 // Insert the new instruction into the basic block...
5826 BasicBlock *InstParent = I->getParent();
5827 BasicBlock::iterator InsertPos = I->getIterator();
5828
5829 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5830 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5831 // We need to fix up the insertion point.
5832 if (isa<PHINode>(I)) // PHI -> Non-PHI
5833 InsertPos = InstParent->getFirstInsertionPt();
5834 else // Non-PHI -> PHI
5835 InsertPos = InstParent->getFirstNonPHIIt();
5836 }
5837
5838 Result->insertInto(InstParent, InsertPos);
5839
5840 // Push the new instruction and any users onto the worklist.
5841 Worklist.pushUsersToWorkList(*Result);
5842 Worklist.push(Result);
5843
5845 } else {
5846 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5847 << " New = " << *I << '\n');
5848
5849 // If the instruction was modified, it's possible that it is now dead.
5850 // if so, remove it.
5853 } else {
5854 Worklist.pushUsersToWorkList(*I);
5855 Worklist.push(I);
5856 }
5857 }
5858 MadeIRChange = true;
5859 }
5860 }
5861
5862 Worklist.zap();
5863 return MadeIRChange;
5864}
5865
5866// Track the scopes used by !alias.scope and !noalias. In a function, a
5867// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5868// by both sets. If not, the declaration of the scope can be safely omitted.
5869// The MDNode of the scope can be omitted as well for the instructions that are
5870// part of this function. We do not do that at this point, as this might become
5871// too time consuming to do.
5873 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5874 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5875
5876public:
5878 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5879 if (!I->hasMetadataOtherThanDebugLoc())
5880 return;
5881
5882 auto Track = [](Metadata *ScopeList, auto &Container) {
5883 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5884 if (!MDScopeList || !Container.insert(MDScopeList).second)
5885 return;
5886 for (const auto &MDOperand : MDScopeList->operands())
5887 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5888 Container.insert(MDScope);
5889 };
5890
5891 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5892 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5893 }
5894
5897 if (!Decl)
5898 return false;
5899
5900 assert(Decl->use_empty() &&
5901 "llvm.experimental.noalias.scope.decl in use ?");
5902 const MDNode *MDSL = Decl->getScopeList();
5903 assert(MDSL->getNumOperands() == 1 &&
5904 "llvm.experimental.noalias.scope should refer to a single scope");
5905 auto &MDOperand = MDSL->getOperand(0);
5906 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5907 return !UsedAliasScopesAndLists.contains(MD) ||
5908 !UsedNoAliasScopesAndLists.contains(MD);
5909
5910 // Not an MDNode ? throw away.
5911 return true;
5912 }
5913};
5914
5915/// Populate the IC worklist from a function, by walking it in reverse
5916/// post-order and adding all reachable code to the worklist.
5917///
5918/// This has a couple of tricks to make the code faster and more powerful. In
5919/// particular, we constant fold and DCE instructions as we go, to avoid adding
5920/// them to the worklist (this significantly speeds up instcombine on code where
5921/// many instructions are dead or constant). Additionally, if we find a branch
5922/// whose condition is a known constant, we only visit the reachable successors.
5924 bool MadeIRChange = false;
5926 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5927 DenseMap<Constant *, Constant *> FoldedConstants;
5928 AliasScopeTracker SeenAliasScopes;
5929
5930 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5931 for (BasicBlock *Succ : successors(BB))
5932 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5933 for (PHINode &PN : Succ->phis())
5934 for (Use &U : PN.incoming_values())
5935 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5936 U.set(PoisonValue::get(PN.getType()));
5937 MadeIRChange = true;
5938 }
5939 };
5940
5941 for (BasicBlock *BB : RPOT) {
5942 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5943 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5944 })) {
5945 HandleOnlyLiveSuccessor(BB, nullptr);
5946 continue;
5947 }
5948 LiveBlocks.insert(BB);
5949
5950 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5951 // ConstantProp instruction if trivially constant.
5952 if (!Inst.use_empty() &&
5953 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5954 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5955 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5956 << '\n');
5957 Inst.replaceAllUsesWith(C);
5958 ++NumConstProp;
5959 if (isInstructionTriviallyDead(&Inst, &TLI))
5960 Inst.eraseFromParent();
5961 MadeIRChange = true;
5962 continue;
5963 }
5964
5965 // See if we can constant fold its operands.
5966 for (Use &U : Inst.operands()) {
5968 continue;
5969
5970 auto *C = cast<Constant>(U);
5971 Constant *&FoldRes = FoldedConstants[C];
5972 if (!FoldRes)
5973 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5974
5975 if (FoldRes != C) {
5976 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5977 << "\n Old = " << *C
5978 << "\n New = " << *FoldRes << '\n');
5979 U = FoldRes;
5980 MadeIRChange = true;
5981 }
5982 }
5983
5984 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5985 // these call instructions consumes non-trivial amount of time and
5986 // provides no value for the optimization.
5987 if (!Inst.isDebugOrPseudoInst()) {
5988 InstrsForInstructionWorklist.push_back(&Inst);
5989 SeenAliasScopes.analyse(&Inst);
5990 }
5991 }
5992
5993 // If this is a branch or switch on a constant, mark only the single
5994 // live successor. Otherwise assume all successors are live.
5995 Instruction *TI = BB->getTerminator();
5996 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5997 if (isa<UndefValue>(BI->getCondition())) {
5998 // Branch on undef is UB.
5999 HandleOnlyLiveSuccessor(BB, nullptr);
6000 continue;
6001 }
6002 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
6003 bool CondVal = Cond->getZExtValue();
6004 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
6005 continue;
6006 }
6007 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
6008 if (isa<UndefValue>(SI->getCondition())) {
6009 // Switch on undef is UB.
6010 HandleOnlyLiveSuccessor(BB, nullptr);
6011 continue;
6012 }
6013 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
6014 HandleOnlyLiveSuccessor(BB,
6015 SI->findCaseValue(Cond)->getCaseSuccessor());
6016 continue;
6017 }
6018 }
6019 }
6020
6021 // Remove instructions inside unreachable blocks. This prevents the
6022 // instcombine code from having to deal with some bad special cases, and
6023 // reduces use counts of instructions.
6024 for (BasicBlock &BB : F) {
6025 if (LiveBlocks.count(&BB))
6026 continue;
6027
6028 unsigned NumDeadInstInBB;
6029 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
6030
6031 MadeIRChange |= NumDeadInstInBB != 0;
6032 NumDeadInst += NumDeadInstInBB;
6033 }
6034
6035 // Once we've found all of the instructions to add to instcombine's worklist,
6036 // add them in reverse order. This way instcombine will visit from the top
6037 // of the function down. This jives well with the way that it adds all uses
6038 // of instructions to the worklist after doing a transformation, thus avoiding
6039 // some N^2 behavior in pathological cases.
6040 Worklist.reserve(InstrsForInstructionWorklist.size());
6041 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
6042 // DCE instruction if trivially dead. As we iterate in reverse program
6043 // order here, we will clean up whole chains of dead instructions.
6044 if (isInstructionTriviallyDead(Inst, &TLI) ||
6045 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
6046 ++NumDeadInst;
6047 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
6048 salvageDebugInfo(*Inst);
6049 Inst->eraseFromParent();
6050 MadeIRChange = true;
6051 continue;
6052 }
6053
6054 Worklist.push(Inst);
6055 }
6056
6057 return MadeIRChange;
6058}
6059
6061 // Collect backedges.
6063 for (BasicBlock *BB : RPOT) {
6064 Visited.insert(BB);
6065 for (BasicBlock *Succ : successors(BB))
6066 if (Visited.contains(Succ))
6067 BackEdges.insert({BB, Succ});
6068 }
6069 ComputedBackEdges = true;
6070}
6071
6077 const InstCombineOptions &Opts) {
6078 auto &DL = F.getDataLayout();
6079 bool VerifyFixpoint = Opts.VerifyFixpoint &&
6080 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
6081
6082 /// Builder - This is an IRBuilder that automatically inserts new
6083 /// instructions into the worklist when they are created.
6085 F.getContext(), TargetFolder(DL),
6086 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
6087 Worklist.add(I);
6088 if (auto *Assume = dyn_cast<AssumeInst>(I))
6089 AC.registerAssumption(Assume);
6090 }));
6091
6093
6094 // Lower dbg.declare intrinsics otherwise their value may be clobbered
6095 // by instcombiner.
6096 bool MadeIRChange = false;
6098 MadeIRChange = LowerDbgDeclare(F);
6099
6100 // Iterate while there is work to do.
6101 unsigned Iteration = 0;
6102 while (true) {
6103 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
6104 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
6105 << " on " << F.getName()
6106 << " reached; stopping without verifying fixpoint\n");
6107 break;
6108 }
6109
6110 ++Iteration;
6111 ++NumWorklistIterations;
6112 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
6113 << F.getName() << "\n");
6114
6115 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
6116 BPI, PSI, DL, RPOT);
6118 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
6119 MadeChangeInThisIteration |= IC.run();
6120 if (!MadeChangeInThisIteration)
6121 break;
6122
6123 MadeIRChange = true;
6124 if (Iteration > Opts.MaxIterations) {
6126 "Instruction Combining on " + Twine(F.getName()) +
6127 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
6128 " iterations. " +
6129 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6130 "'instcombine-no-verify-fixpoint' to suppress this error.");
6131 }
6132 }
6133
6134 if (Iteration == 1)
6135 ++NumOneIteration;
6136 else if (Iteration == 2)
6137 ++NumTwoIterations;
6138 else if (Iteration == 3)
6139 ++NumThreeIterations;
6140 else
6141 ++NumFourOrMoreIterations;
6142
6143 return MadeIRChange;
6144}
6145
6147
6149 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6150 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6151 OS, MapClassName2PassName);
6152 OS << '<';
6153 OS << "max-iterations=" << Options.MaxIterations << ";";
6154 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6155 OS << '>';
6156}
6157
6158char InstCombinePass::ID = 0;
6159
6162 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6163 // No changes since last InstCombine pass, exit early.
6164 if (LRT.shouldSkip(&ID))
6165 return PreservedAnalyses::all();
6166
6167 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6168 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6169 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6171 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6172
6173 auto *AA = &AM.getResult<AAManager>(F);
6174 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6175 ProfileSummaryInfo *PSI =
6176 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6177 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6178 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6180
6181 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6182 BFI, BPI, PSI, Options)) {
6183 // No changes, all analyses are preserved.
6184 LRT.update(&ID, /*Changed=*/false);
6185 return PreservedAnalyses::all();
6186 }
6187
6188 // Mark all the analyses that instcombine updates as preserved.
6190 LRT.update(&ID, /*Changed=*/true);
6193 return PA;
6194}
6195
6211
6213 if (skipFunction(F))
6214 return false;
6215
6216 // Required analyses.
6217 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6218 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6219 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6221 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6223
6224 // Optional analyses.
6225 ProfileSummaryInfo *PSI =
6227 BlockFrequencyInfo *BFI =
6228 (PSI && PSI->hasProfileSummary()) ?
6230 nullptr;
6231 BranchProbabilityInfo *BPI = nullptr;
6232 if (auto *WrapperPass =
6234 BPI = &WrapperPass->getBPI();
6235
6236 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6237 BFI, BPI, PSI, InstCombineOptions());
6238}
6239
6241
6245
6247 "Combine redundant instructions", false, false)
6258 "Combine redundant instructions", false, false)
6259
6260// Initialization Routines
6264
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static Constant * constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector, Constant *Splat, bool SplatLHS, const DataLayout &DL)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
The Input class is used to parse a yaml document into in-memory structs and vectors.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:213
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1769
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1901
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1497
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1939
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:828
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1971
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1151
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1952
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:852
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:219
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:493
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:491
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:438
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:781
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:522
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:222
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:321
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:807
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2007
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2762
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth=0)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
Tracking metadata reference owned by Metadata.
Definition Metadata.h:900
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1478
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:46
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:61
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:106
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:254
op_range operands()
Definition User.h:267
op_iterator op_begin()
Definition User.h:259
const Use & getOperandUse(unsigned i) const
Definition User.h:220
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:708
bool use_empty() const
Definition Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:888
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
Splat_match< T > m_ConstantSplat(const T &SubPattern)
Match a constant splat. TODO: Extend this to non-constant splats.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_VectorInsert(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2106
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2485
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:257
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1731
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1618
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2468
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1795
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1662
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2037
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2414
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:323
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2009
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
Definition Metadata.cpp:64
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2136
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:254
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:251
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const