LLVM 23.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool> EnableCodeSinking("instcombine-code-sinking",
136 cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
148 "instcombine-max-allocsite-removable-users", cl::Hidden, cl::init(2048),
149 cl::desc("Maximum number of users to visit in alloc-site "
150 "removability analysis"));
151
152namespace llvm {
154} // end namespace llvm
155
156// FIXME: Remove this flag when it is no longer necessary to convert
157// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
158// increases variable availability at the cost of accuracy. Variables that
159// cannot be promoted by mem2reg or SROA will be described as living in memory
160// for their entire lifetime. However, passes like DSE and instcombine can
161// delete stores to the alloca, leading to misleading and inaccurate debug
162// information. This flag can be removed when those passes are fixed.
163static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
164 cl::Hidden, cl::init(true));
165
166std::optional<Instruction *>
168 // Handle target specific intrinsics
169 if (II.getCalledFunction()->isTargetIntrinsic()) {
170 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
171 }
172 return std::nullopt;
173}
174
176 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
177 bool &KnownBitsComputed) {
178 // Handle target specific intrinsics
179 if (II.getCalledFunction()->isTargetIntrinsic()) {
180 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
181 *this, II, DemandedMask, Known, KnownBitsComputed);
182 }
183 return std::nullopt;
184}
185
187 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
188 APInt &PoisonElts2, APInt &PoisonElts3,
189 std::function<void(Instruction *, unsigned, APInt, APInt &)>
190 SimplifyAndSetOp) {
191 // Handle target specific intrinsics
192 if (II.getCalledFunction()->isTargetIntrinsic()) {
193 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
194 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
195 SimplifyAndSetOp);
196 }
197 return std::nullopt;
198}
199
200bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
201 // Approved exception for TTI use: This queries a legality property of the
202 // target, not an profitability heuristic. Ideally this should be part of
203 // DataLayout instead.
204 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
205}
206
207Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
208 if (!RewriteGEP)
209 return llvm::emitGEPOffset(&Builder, DL, GEP);
210
211 IRBuilderBase::InsertPointGuard Guard(Builder);
212 auto *Inst = dyn_cast<Instruction>(GEP);
213 if (Inst)
214 Builder.SetInsertPoint(Inst);
215
216 Value *Offset = EmitGEPOffset(GEP);
217 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
218 if (Inst && !GEP->hasAllConstantIndices() &&
219 !GEP->getSourceElementType()->isIntegerTy(8)) {
221 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
222 Offset, "", GEP->getNoWrapFlags()));
224 }
225 return Offset;
226}
227
228Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
229 GEPNoWrapFlags NW, Type *IdxTy,
230 bool RewriteGEPs) {
231 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
232 if (Sum)
233 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
234 NW.isInBounds());
235 else
236 return Offset;
237 };
238
239 Value *Sum = nullptr;
240 Value *OneUseSum = nullptr;
241 Value *OneUseBase = nullptr;
242 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
243 for (GEPOperator *GEP : reverse(GEPs)) {
244 Value *Offset;
245 {
246 // Expand the offset at the point of the previous GEP to enable rewriting.
247 // However, use the original insertion point for calculating Sum.
248 IRBuilderBase::InsertPointGuard Guard(Builder);
249 auto *Inst = dyn_cast<Instruction>(GEP);
250 if (RewriteGEPs && Inst)
251 Builder.SetInsertPoint(Inst);
252
254 if (Offset->getType() != IdxTy)
255 Offset = Builder.CreateVectorSplat(
256 cast<VectorType>(IdxTy)->getElementCount(), Offset);
257 if (GEP->hasOneUse()) {
258 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
259 OneUseSum = Add(OneUseSum, Offset);
260 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
261 if (!OneUseBase)
262 OneUseBase = GEP->getPointerOperand();
263 continue;
264 }
265
266 if (OneUseSum)
267 Offset = Add(OneUseSum, Offset);
268
269 // Rewrite the GEP to reuse the computed offset. This also includes
270 // offsets from preceding one-use GEPs of matched type.
271 if (RewriteGEPs && Inst &&
272 Offset->getType()->isVectorTy() == GEP->getType()->isVectorTy() &&
273 !(GEP->getSourceElementType()->isIntegerTy(8) &&
274 GEP->getOperand(1) == Offset)) {
276 *Inst,
277 Builder.CreatePtrAdd(
278 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
279 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
281 }
282 }
283
284 Sum = Add(Sum, Offset);
285 OneUseSum = OneUseBase = nullptr;
286 OneUseFlags = GEPNoWrapFlags::all();
287 }
288 if (OneUseSum)
289 Sum = Add(Sum, OneUseSum);
290 if (!Sum)
291 return Constant::getNullValue(IdxTy);
292 return Sum;
293}
294
295/// Legal integers and common types are considered desirable. This is used to
296/// avoid creating instructions with types that may not be supported well by the
297/// the backend.
298/// NOTE: This treats i8, i16 and i32 specially because they are common
299/// types in frontend languages.
300bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
301 switch (BitWidth) {
302 case 8:
303 case 16:
304 case 32:
305 return true;
306 default:
307 return DL.isLegalInteger(BitWidth);
308 }
309}
310
311/// Return true if it is desirable to convert an integer computation from a
312/// given bit width to a new bit width.
313/// We don't want to convert from a legal or desirable type (like i8) to an
314/// illegal type or from a smaller to a larger illegal type. A width of '1'
315/// is always treated as a desirable type because i1 is a fundamental type in
316/// IR, and there are many specialized optimizations for i1 types.
317/// Common/desirable widths are equally treated as legal to convert to, in
318/// order to open up more combining opportunities.
319bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
320 unsigned ToWidth) const {
321 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
322 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
323
324 // Convert to desirable widths even if they are not legal types.
325 // Only shrink types, to prevent infinite loops.
326 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
327 return true;
328
329 // If this is a legal or desiable integer from type, and the result would be
330 // an illegal type, don't do the transformation.
331 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
332 return false;
333
334 // Otherwise, if both are illegal, do not increase the size of the result. We
335 // do allow things like i160 -> i64, but not i64 -> i160.
336 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
337 return false;
338
339 return true;
340}
341
342/// Return true if it is desirable to convert a computation from 'From' to 'To'.
343/// We don't want to convert from a legal to an illegal type or from a smaller
344/// to a larger illegal type. i1 is always treated as a legal type because it is
345/// a fundamental type in IR, and there are many specialized optimizations for
346/// i1 types.
347bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
348 // TODO: This could be extended to allow vectors. Datalayout changes might be
349 // needed to properly support that.
350 if (!From->isIntegerTy() || !To->isIntegerTy())
351 return false;
352
353 unsigned FromWidth = From->getPrimitiveSizeInBits();
354 unsigned ToWidth = To->getPrimitiveSizeInBits();
355 return shouldChangeType(FromWidth, ToWidth);
356}
357
358// Return true, if No Signed Wrap should be maintained for I.
359// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
360// where both B and C should be ConstantInts, results in a constant that does
361// not overflow. This function only handles the Add/Sub/Mul opcodes. For
362// all other opcodes, the function conservatively returns false.
365 if (!OBO || !OBO->hasNoSignedWrap())
366 return false;
367
368 const APInt *BVal, *CVal;
369 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
370 return false;
371
372 // We reason about Add/Sub/Mul Only.
373 bool Overflow = false;
374 switch (I.getOpcode()) {
375 case Instruction::Add:
376 (void)BVal->sadd_ov(*CVal, Overflow);
377 break;
378 case Instruction::Sub:
379 (void)BVal->ssub_ov(*CVal, Overflow);
380 break;
381 case Instruction::Mul:
382 (void)BVal->smul_ov(*CVal, Overflow);
383 break;
384 default:
385 // Conservatively return false for other opcodes.
386 return false;
387 }
388 return !Overflow;
389}
390
393 return OBO && OBO->hasNoUnsignedWrap();
394}
395
398 return OBO && OBO->hasNoSignedWrap();
399}
400
401/// Conservatively clears subclassOptionalData after a reassociation or
402/// commutation. We preserve fast-math flags when applicable as they can be
403/// preserved.
406 if (!FPMO) {
407 I.clearSubclassOptionalData();
408 return;
409 }
410
411 FastMathFlags FMF = I.getFastMathFlags();
412 I.clearSubclassOptionalData();
413 I.setFastMathFlags(FMF);
414}
415
416/// Combine constant operands of associative operations either before or after a
417/// cast to eliminate one of the associative operations:
418/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
419/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
421 InstCombinerImpl &IC) {
422 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
423 if (!Cast || !Cast->hasOneUse())
424 return false;
425
426 // TODO: Enhance logic for other casts and remove this check.
427 auto CastOpcode = Cast->getOpcode();
428 if (CastOpcode != Instruction::ZExt)
429 return false;
430
431 // TODO: Enhance logic for other BinOps and remove this check.
432 if (!BinOp1->isBitwiseLogicOp())
433 return false;
434
435 auto AssocOpcode = BinOp1->getOpcode();
436 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
437 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
438 return false;
439
440 Constant *C1, *C2;
441 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
442 !match(BinOp2->getOperand(1), m_Constant(C2)))
443 return false;
444
445 // TODO: This assumes a zext cast.
446 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
447 // to the destination type might lose bits.
448
449 // Fold the constants together in the destination type:
450 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
451 const DataLayout &DL = IC.getDataLayout();
452 Type *DestTy = C1->getType();
453 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
454 if (!CastC2)
455 return false;
456 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
457 if (!FoldedC)
458 return false;
459
460 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
461 IC.replaceOperand(*BinOp1, 1, FoldedC);
463 Cast->dropPoisonGeneratingFlags();
464 return true;
465}
466
467// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
468// inttoptr ( ptrtoint (x) ) --> x
469Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
470 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
471 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
472 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
473 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
474 Type *CastTy = IntToPtr->getDestTy();
475 if (PtrToInt &&
476 CastTy->getPointerAddressSpace() ==
477 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
478 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
479 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
480 return PtrToInt->getOperand(0);
481 }
482 return nullptr;
483}
484
485/// This performs a few simplifications for operators that are associative or
486/// commutative:
487///
488/// Commutative operators:
489///
490/// 1. Order operands such that they are listed from right (least complex) to
491/// left (most complex). This puts constants before unary operators before
492/// binary operators.
493///
494/// Associative operators:
495///
496/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
497/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
498///
499/// Associative and commutative operators:
500///
501/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
502/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
503/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
504/// if C1 and C2 are constants.
506 Instruction::BinaryOps Opcode = I.getOpcode();
507 bool Changed = false;
508
509 do {
510 // Order operands such that they are listed from right (least complex) to
511 // left (most complex). This puts constants before unary operators before
512 // binary operators.
513 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
514 getComplexity(I.getOperand(1)))
515 Changed = !I.swapOperands();
516
517 if (I.isCommutative()) {
518 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
519 replaceOperand(I, 0, Pair->first);
520 replaceOperand(I, 1, Pair->second);
521 Changed = true;
522 }
523 }
524
525 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
526 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
527
528 if (I.isAssociative()) {
529 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
530 if (Op0 && Op0->getOpcode() == Opcode) {
531 Value *A = Op0->getOperand(0);
532 Value *B = Op0->getOperand(1);
533 Value *C = I.getOperand(1);
534
535 // Does "B op C" simplify?
536 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
537 // It simplifies to V. Form "A op V".
538 replaceOperand(I, 0, A);
539 replaceOperand(I, 1, V);
540 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
541 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
542
543 // Conservatively clear all optional flags since they may not be
544 // preserved by the reassociation. Reset nsw/nuw based on the above
545 // analysis.
547
548 // Note: this is only valid because SimplifyBinOp doesn't look at
549 // the operands to Op0.
550 if (IsNUW)
551 I.setHasNoUnsignedWrap(true);
552
553 if (IsNSW)
554 I.setHasNoSignedWrap(true);
555
556 Changed = true;
557 ++NumReassoc;
558 continue;
559 }
560 }
561
562 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
563 if (Op1 && Op1->getOpcode() == Opcode) {
564 Value *A = I.getOperand(0);
565 Value *B = Op1->getOperand(0);
566 Value *C = Op1->getOperand(1);
567
568 // Does "A op B" simplify?
569 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
570 // It simplifies to V. Form "V op C".
571 replaceOperand(I, 0, V);
572 replaceOperand(I, 1, C);
573 // Conservatively clear the optional flags, since they may not be
574 // preserved by the reassociation.
576 Changed = true;
577 ++NumReassoc;
578 continue;
579 }
580 }
581 }
582
583 if (I.isAssociative() && I.isCommutative()) {
584 if (simplifyAssocCastAssoc(&I, *this)) {
585 Changed = true;
586 ++NumReassoc;
587 continue;
588 }
589
590 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
591 if (Op0 && Op0->getOpcode() == Opcode) {
592 Value *A = Op0->getOperand(0);
593 Value *B = Op0->getOperand(1);
594 Value *C = I.getOperand(1);
595
596 // Does "C op A" simplify?
597 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
598 // It simplifies to V. Form "V op B".
599 replaceOperand(I, 0, V);
600 replaceOperand(I, 1, B);
601 // Conservatively clear the optional flags, since they may not be
602 // preserved by the reassociation.
604 Changed = true;
605 ++NumReassoc;
606 continue;
607 }
608 }
609
610 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
611 if (Op1 && Op1->getOpcode() == Opcode) {
612 Value *A = I.getOperand(0);
613 Value *B = Op1->getOperand(0);
614 Value *C = Op1->getOperand(1);
615
616 // Does "C op A" simplify?
617 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
618 // It simplifies to V. Form "B op V".
619 replaceOperand(I, 0, B);
620 replaceOperand(I, 1, V);
621 // Conservatively clear the optional flags, since they may not be
622 // preserved by the reassociation.
624 Changed = true;
625 ++NumReassoc;
626 continue;
627 }
628 }
629
630 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
631 // if C1 and C2 are constants.
632 Value *A, *B;
633 Constant *C1, *C2, *CRes;
634 if (Op0 && Op1 &&
635 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
636 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
637 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
638 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
639 bool IsNUW = hasNoUnsignedWrap(I) &&
640 hasNoUnsignedWrap(*Op0) &&
641 hasNoUnsignedWrap(*Op1);
642 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
643 BinaryOperator::CreateNUW(Opcode, A, B) :
644 BinaryOperator::Create(Opcode, A, B);
645
646 if (isa<FPMathOperator>(NewBO)) {
647 FastMathFlags Flags = I.getFastMathFlags() &
648 Op0->getFastMathFlags() &
649 Op1->getFastMathFlags();
650 NewBO->setFastMathFlags(Flags);
651 }
652 InsertNewInstWith(NewBO, I.getIterator());
653 NewBO->takeName(Op1);
654 replaceOperand(I, 0, NewBO);
655 replaceOperand(I, 1, CRes);
656 // Conservatively clear the optional flags, since they may not be
657 // preserved by the reassociation.
659 if (IsNUW)
660 I.setHasNoUnsignedWrap(true);
661
662 Changed = true;
663 continue;
664 }
665 }
666
667 // No further simplifications.
668 return Changed;
669 } while (true);
670}
671
672/// Return whether "X LOp (Y ROp Z)" is always equal to
673/// "(X LOp Y) ROp (X LOp Z)".
676 // X & (Y | Z) <--> (X & Y) | (X & Z)
677 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
678 if (LOp == Instruction::And)
679 return ROp == Instruction::Or || ROp == Instruction::Xor;
680
681 // X | (Y & Z) <--> (X | Y) & (X | Z)
682 if (LOp == Instruction::Or)
683 return ROp == Instruction::And;
684
685 // X * (Y + Z) <--> (X * Y) + (X * Z)
686 // X * (Y - Z) <--> (X * Y) - (X * Z)
687 if (LOp == Instruction::Mul)
688 return ROp == Instruction::Add || ROp == Instruction::Sub;
689
690 return false;
691}
692
693/// Return whether "(X LOp Y) ROp Z" is always equal to
694/// "(X ROp Z) LOp (Y ROp Z)".
698 return leftDistributesOverRight(ROp, LOp);
699
700 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
702
703 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
704 // but this requires knowing that the addition does not overflow and other
705 // such subtleties.
706}
707
708/// This function returns identity value for given opcode, which can be used to
709/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
711 if (isa<Constant>(V))
712 return nullptr;
713
714 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
715}
716
717/// This function predicates factorization using distributive laws. By default,
718/// it just returns the 'Op' inputs. But for special-cases like
719/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
720/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
721/// allow more factorization opportunities.
724 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
725 assert(Op && "Expected a binary operator");
726 LHS = Op->getOperand(0);
727 RHS = Op->getOperand(1);
728 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
729 Constant *C;
730 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
731 // X << C --> X * (1 << C)
733 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
734 assert(RHS && "Constant folding of immediate constants failed");
735 return Instruction::Mul;
736 }
737 // TODO: We can add other conversions e.g. shr => div etc.
738 }
739 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
740 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
742 // lshr nneg C, X --> ashr nneg C, X
743 return Instruction::AShr;
744 }
745 }
746 return Op->getOpcode();
747}
748
749/// This tries to simplify binary operations by factorizing out common terms
750/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
753 Instruction::BinaryOps InnerOpcode, Value *A,
754 Value *B, Value *C, Value *D) {
755 assert(A && B && C && D && "All values must be provided");
756
757 Value *V = nullptr;
758 Value *RetVal = nullptr;
759 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
760 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
761
762 // Does "X op' Y" always equal "Y op' X"?
763 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
764
765 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
766 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
767 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
768 // commutative case, "(A op' B) op (C op' A)"?
769 if (A == C || (InnerCommutative && A == D)) {
770 if (A != C)
771 std::swap(C, D);
772 // Consider forming "A op' (B op D)".
773 // If "B op D" simplifies then it can be formed with no cost.
774 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
775
776 // If "B op D" doesn't simplify then only go on if one of the existing
777 // operations "A op' B" and "C op' D" will be zapped as no longer used.
778 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
779 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
780 if (V)
781 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
782 }
783 }
784
785 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
786 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
787 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
788 // commutative case, "(A op' B) op (B op' D)"?
789 if (B == D || (InnerCommutative && B == C)) {
790 if (B != D)
791 std::swap(C, D);
792 // Consider forming "(A op C) op' B".
793 // If "A op C" simplifies then it can be formed with no cost.
794 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
795
796 // If "A op C" doesn't simplify then only go on if one of the existing
797 // operations "A op' B" and "C op' D" will be zapped as no longer used.
798 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
799 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
800 if (V)
801 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
802 }
803 }
804
805 if (!RetVal)
806 return nullptr;
807
808 ++NumFactor;
809 RetVal->takeName(&I);
810
811 // Try to add no-overflow flags to the final value.
812 if (isa<BinaryOperator>(RetVal)) {
813 bool HasNSW = false;
814 bool HasNUW = false;
816 HasNSW = I.hasNoSignedWrap();
817 HasNUW = I.hasNoUnsignedWrap();
818 }
819 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
820 HasNSW &= LOBO->hasNoSignedWrap();
821 HasNUW &= LOBO->hasNoUnsignedWrap();
822 }
823
824 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
825 HasNSW &= ROBO->hasNoSignedWrap();
826 HasNUW &= ROBO->hasNoUnsignedWrap();
827 }
828
829 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
830 // We can propagate 'nsw' if we know that
831 // %Y = mul nsw i16 %X, C
832 // %Z = add nsw i16 %Y, %X
833 // =>
834 // %Z = mul nsw i16 %X, C+1
835 //
836 // iff C+1 isn't INT_MIN
837 const APInt *CInt;
838 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
839 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
840
841 // nuw can be propagated with any constant or nuw value.
842 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
843 }
844 }
845 return RetVal;
846}
847
848// If `I` has one Const operand and the other matches `(ctpop (not x))`,
849// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
850// This is only useful is the new subtract can fold so we only handle the
851// following cases:
852// 1) (add/sub/disjoint_or C, (ctpop (not x))
853// -> (add/sub/disjoint_or C', (ctpop x))
854// 1) (cmp pred C, (ctpop (not x))
855// -> (cmp pred C', (ctpop x))
857 unsigned Opc = I->getOpcode();
858 unsigned ConstIdx = 1;
859 switch (Opc) {
860 default:
861 return nullptr;
862 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
863 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
864 // is constant.
865 case Instruction::Sub:
866 ConstIdx = 0;
867 break;
868 case Instruction::ICmp:
869 // Signed predicates aren't correct in some edge cases like for i2 types, as
870 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
871 // comparisons against it are simplfied to unsigned.
872 if (cast<ICmpInst>(I)->isSigned())
873 return nullptr;
874 break;
875 case Instruction::Or:
876 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
877 return nullptr;
878 [[fallthrough]];
879 case Instruction::Add:
880 break;
881 }
882
883 Value *Op;
884 // Find ctpop.
885 if (!match(I->getOperand(1 - ConstIdx), m_OneUse(m_Ctpop(m_Value(Op)))))
886 return nullptr;
887
888 Constant *C;
889 // Check other operand is ImmConstant.
890 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
891 return nullptr;
892
893 Type *Ty = Op->getType();
894 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
895 // Need extra check for icmp. Note if this check is true, it generally means
896 // the icmp will simplify to true/false.
897 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
898 Constant *Cmp =
900 if (!Cmp || !Cmp->isNullValue())
901 return nullptr;
902 }
903
904 // Check we can invert `(not x)` for free.
905 bool Consumes = false;
906 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
907 return nullptr;
908 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
909 assert(NotOp != nullptr &&
910 "Desync between isFreeToInvert and getFreelyInverted");
911
912 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
913
914 Value *R = nullptr;
915
916 // Do the transformation here to avoid potentially introducing an infinite
917 // loop.
918 switch (Opc) {
919 case Instruction::Sub:
920 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
921 break;
922 case Instruction::Or:
923 case Instruction::Add:
924 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
925 break;
926 case Instruction::ICmp:
927 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
928 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
929 break;
930 default:
931 llvm_unreachable("Unhandled Opcode");
932 }
933 assert(R != nullptr);
934 return replaceInstUsesWith(*I, R);
935}
936
937// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
938// IFF
939// 1) the logic_shifts match
940// 2) either both binops are binops and one is `and` or
941// BinOp1 is `and`
942// (logic_shift (inv_logic_shift C1, C), C) == C1 or
943//
944// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
945//
946// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
947// IFF
948// 1) the logic_shifts match
949// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
950//
951// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
952//
953// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
954// IFF
955// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
956// 2) Binop2 is `not`
957//
958// -> (arithmetic_shift Binop1((not X), Y), Amt)
959
961 const DataLayout &DL = I.getDataLayout();
962 auto IsValidBinOpc = [](unsigned Opc) {
963 switch (Opc) {
964 default:
965 return false;
966 case Instruction::And:
967 case Instruction::Or:
968 case Instruction::Xor:
969 case Instruction::Add:
970 // Skip Sub as we only match constant masks which will canonicalize to use
971 // add.
972 return true;
973 }
974 };
975
976 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
977 // constraints.
978 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
979 unsigned ShOpc) {
980 assert(ShOpc != Instruction::AShr);
981 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
982 ShOpc == Instruction::Shl;
983 };
984
985 auto GetInvShift = [](unsigned ShOpc) {
986 assert(ShOpc != Instruction::AShr);
987 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
988 };
989
990 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
991 unsigned ShOpc, Constant *CMask,
992 Constant *CShift) {
993 // If the BinOp1 is `and` we don't need to check the mask.
994 if (BinOpc1 == Instruction::And)
995 return true;
996
997 // For all other possible transfers we need complete distributable
998 // binop/shift (anything but `add` + `lshr`).
999 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
1000 return false;
1001
1002 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
1003 // vecs, otherwise the mask will be simplified and the following check will
1004 // handle it).
1005 if (BinOpc2 == Instruction::And)
1006 return true;
1007
1008 // Otherwise, need mask that meets the below requirement.
1009 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1010 Constant *MaskInvShift =
1011 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1012 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1013 CMask;
1014 };
1015
1016 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1017 Constant *CMask, *CShift;
1018 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1019 if (!match(I.getOperand(ShOpnum),
1020 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1021 return nullptr;
1022 if (!match(
1023 I.getOperand(1 - ShOpnum),
1026 m_Value(ShiftedX)),
1027 m_Value(Mask)))))
1028 return nullptr;
1029 // Make sure we are matching instruction shifts and not ConstantExpr
1030 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1031 auto *IX = dyn_cast<Instruction>(ShiftedX);
1032 if (!IY || !IX)
1033 return nullptr;
1034
1035 // LHS and RHS need same shift opcode
1036 unsigned ShOpc = IY->getOpcode();
1037 if (ShOpc != IX->getOpcode())
1038 return nullptr;
1039
1040 // Make sure binop is real instruction and not ConstantExpr
1041 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1042 if (!BO2)
1043 return nullptr;
1044
1045 unsigned BinOpc = BO2->getOpcode();
1046 // Make sure we have valid binops.
1047 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1048 return nullptr;
1049
1050 if (ShOpc == Instruction::AShr) {
1051 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1052 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1053 Value *NotX = Builder.CreateNot(X);
1054 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1056 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1057 }
1058
1059 return nullptr;
1060 }
1061
1062 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1063 // distribute to drop the shift irrelevant of constants.
1064 if (BinOpc == I.getOpcode() &&
1065 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1066 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1067 Value *NewBinOp1 = Builder.CreateBinOp(
1068 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1069 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1070 }
1071
1072 // Otherwise we can only distribute by constant shifting the mask, so
1073 // ensure we have constants.
1074 if (!match(Shift, m_ImmConstant(CShift)))
1075 return nullptr;
1076 if (!match(Mask, m_ImmConstant(CMask)))
1077 return nullptr;
1078
1079 // Check if we can distribute the binops.
1080 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1081 return nullptr;
1082
1083 Constant *NewCMask =
1084 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1085 Value *NewBinOp2 = Builder.CreateBinOp(
1086 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1087 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1088 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1089 NewBinOp1, CShift);
1090 };
1091
1092 if (Instruction *R = MatchBinOp(0))
1093 return R;
1094 return MatchBinOp(1);
1095}
1096
1097// (Binop (zext C), (select C, T, F))
1098// -> (select C, (binop 1, T), (binop 0, F))
1099//
1100// (Binop (sext C), (select C, T, F))
1101// -> (select C, (binop -1, T), (binop 0, F))
1102//
1103// Attempt to simplify binary operations into a select with folded args, when
1104// one operand of the binop is a select instruction and the other operand is a
1105// zext/sext extension, whose value is the select condition.
1108 // TODO: this simplification may be extended to any speculatable instruction,
1109 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1110 Instruction::BinaryOps Opc = I.getOpcode();
1111 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1112 Value *A, *CondVal, *TrueVal, *FalseVal;
1113 Value *CastOp;
1114 Constant *CastTrueVal, *CastFalseVal;
1115
1116 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1117 return match(CastOp, m_SelectLike(m_Value(A), m_Constant(CastTrueVal),
1118 m_Constant(CastFalseVal))) &&
1119 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1120 m_Value(FalseVal)));
1121 };
1122
1123 // Make sure one side of the binop is a select instruction, and the other is a
1124 // zero/sign extension operating on a i1.
1125 if (MatchSelectAndCast(LHS, RHS))
1126 CastOp = LHS;
1127 else if (MatchSelectAndCast(RHS, LHS))
1128 CastOp = RHS;
1129 else
1130 return nullptr;
1131
1133 ? nullptr
1134 : cast<SelectInst>(CastOp == LHS ? RHS : LHS);
1135
1136 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1137 bool IsCastOpRHS = (CastOp == RHS);
1138 Value *CastVal = IsTrueArm ? CastFalseVal : CastTrueVal;
1139
1140 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, CastVal)
1141 : Builder.CreateBinOp(Opc, CastVal, V);
1142 };
1143
1144 // If the value used in the zext/sext is the select condition, or the negated
1145 // of the select condition, the binop can be simplified.
1146 if (CondVal == A) {
1147 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1148 return SelectInst::Create(CondVal, NewTrueVal,
1149 NewFoldedConst(true, FalseVal), "", nullptr, SI);
1150 }
1151 if (match(A, m_Not(m_Specific(CondVal)))) {
1152 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1153 return SelectInst::Create(CondVal, NewTrueVal,
1154 NewFoldedConst(false, FalseVal), "", nullptr, SI);
1155 }
1156
1157 return nullptr;
1158}
1159
1161 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1164 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1165 Value *A, *B, *C, *D;
1166 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1167
1168 if (Op0)
1169 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1170 if (Op1)
1171 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1172
1173 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1174 // a common term.
1175 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1176 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1177 return V;
1178
1179 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1180 // term.
1181 if (Op0)
1182 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1183 if (Value *V =
1184 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1185 return V;
1186
1187 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1188 // term.
1189 if (Op1)
1190 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1191 if (Value *V =
1192 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1193 return V;
1194
1195 return nullptr;
1196}
1197
1198/// This tries to simplify binary operations which some other binary operation
1199/// distributes over either by factorizing out common terms
1200/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1201/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1202/// Returns the simplified value, or null if it didn't simplify.
1204 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1207 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1208
1209 // Factorization.
1210 if (Value *R = tryFactorizationFolds(I))
1211 return R;
1212
1213 // Expansion.
1214 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1215 // The instruction has the form "(A op' B) op C". See if expanding it out
1216 // to "(A op C) op' (B op C)" results in simplifications.
1217 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1218 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1219
1220 // Disable the use of undef because it's not safe to distribute undef.
1221 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1222 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1223 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1224
1225 // Do "A op C" and "B op C" both simplify?
1226 if (L && R) {
1227 // They do! Return "L op' R".
1228 ++NumExpand;
1229 C = Builder.CreateBinOp(InnerOpcode, L, R);
1230 C->takeName(&I);
1231 return C;
1232 }
1233
1234 // Does "A op C" simplify to the identity value for the inner opcode?
1235 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1236 // They do! Return "B op C".
1237 ++NumExpand;
1238 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1239 C->takeName(&I);
1240 return C;
1241 }
1242
1243 // Does "B op C" simplify to the identity value for the inner opcode?
1244 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1245 // They do! Return "A op C".
1246 ++NumExpand;
1247 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1248 C->takeName(&I);
1249 return C;
1250 }
1251 }
1252
1253 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1254 // The instruction has the form "A op (B op' C)". See if expanding it out
1255 // to "(A op B) op' (A op C)" results in simplifications.
1256 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1257 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1258
1259 // Disable the use of undef because it's not safe to distribute undef.
1260 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1261 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1262 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1263
1264 // Do "A op B" and "A op C" both simplify?
1265 if (L && R) {
1266 // They do! Return "L op' R".
1267 ++NumExpand;
1268 A = Builder.CreateBinOp(InnerOpcode, L, R);
1269 A->takeName(&I);
1270 return A;
1271 }
1272
1273 // Does "A op B" simplify to the identity value for the inner opcode?
1274 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1275 // They do! Return "A op C".
1276 ++NumExpand;
1277 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1278 A->takeName(&I);
1279 return A;
1280 }
1281
1282 // Does "A op C" simplify to the identity value for the inner opcode?
1283 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1284 // They do! Return "A op B".
1285 ++NumExpand;
1286 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1287 A->takeName(&I);
1288 return A;
1289 }
1290 }
1291
1292 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1293}
1294
1295static std::optional<std::pair<Value *, Value *>>
1297 if (LHS->getParent() != RHS->getParent())
1298 return std::nullopt;
1299
1300 if (LHS->getNumIncomingValues() < 2)
1301 return std::nullopt;
1302
1303 if (!equal(LHS->blocks(), RHS->blocks()))
1304 return std::nullopt;
1305
1306 Value *L0 = LHS->getIncomingValue(0);
1307 Value *R0 = RHS->getIncomingValue(0);
1308
1309 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1310 Value *L1 = LHS->getIncomingValue(I);
1311 Value *R1 = RHS->getIncomingValue(I);
1312
1313 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1314 continue;
1315
1316 return std::nullopt;
1317 }
1318
1319 return std::optional(std::pair(L0, R0));
1320}
1321
1322std::optional<std::pair<Value *, Value *>>
1323InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1326 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1327 return std::nullopt;
1328 switch (LHSInst->getOpcode()) {
1329 case Instruction::PHI:
1331 case Instruction::Select: {
1332 Value *Cond = LHSInst->getOperand(0);
1333 Value *TrueVal = LHSInst->getOperand(1);
1334 Value *FalseVal = LHSInst->getOperand(2);
1335 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1336 FalseVal == RHSInst->getOperand(1))
1337 return std::pair(TrueVal, FalseVal);
1338 return std::nullopt;
1339 }
1340 case Instruction::Call: {
1341 // Match min(a, b) and max(a, b)
1342 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1343 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1344 if (LHSMinMax && RHSMinMax &&
1345 LHSMinMax->getPredicate() ==
1347 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1348 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1349 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1350 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1351 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1352 return std::nullopt;
1353 }
1354 default:
1355 return std::nullopt;
1356 }
1357}
1358
1360 Value *LHS,
1361 Value *RHS) {
1362 Value *A, *B, *C, *D, *E, *F;
1363 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1364 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1365 if (!LHSIsSelect && !RHSIsSelect)
1366 return nullptr;
1367
1369 ? nullptr
1370 : cast<SelectInst>(LHSIsSelect ? LHS : RHS);
1371
1372 FastMathFlags FMF;
1374 if (const auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
1375 FMF = FPOp->getFastMathFlags();
1376 Builder.setFastMathFlags(FMF);
1377 }
1378
1379 Instruction::BinaryOps Opcode = I.getOpcode();
1380 SimplifyQuery Q = SQ.getWithInstruction(&I);
1381
1382 Value *Cond, *True = nullptr, *False = nullptr;
1383
1384 // Special-case for add/negate combination. Replace the zero in the negation
1385 // with the trailing add operand:
1386 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1387 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1388 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1389 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1390 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1391 return nullptr;
1392 Value *N;
1393 if (True && match(FVal, m_Neg(m_Value(N)))) {
1394 Value *Sub = Builder.CreateSub(Z, N);
1395 return Builder.CreateSelect(Cond, True, Sub, I.getName(), SI);
1396 }
1397 if (False && match(TVal, m_Neg(m_Value(N)))) {
1398 Value *Sub = Builder.CreateSub(Z, N);
1399 return Builder.CreateSelect(Cond, Sub, False, I.getName(), SI);
1400 }
1401 return nullptr;
1402 };
1403
1404 if (LHSIsSelect && RHSIsSelect && A == D) {
1405 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1406 Cond = A;
1407 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1408 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1409
1410 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1411 if (False && !True)
1412 True = Builder.CreateBinOp(Opcode, B, E);
1413 else if (True && !False)
1414 False = Builder.CreateBinOp(Opcode, C, F);
1415 }
1416 } else if (LHSIsSelect && LHS->hasOneUse()) {
1417 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1418 Cond = A;
1419 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1420 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1421 if (Value *NewSel = foldAddNegate(B, C, RHS))
1422 return NewSel;
1423 } else if (RHSIsSelect && RHS->hasOneUse()) {
1424 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1425 Cond = D;
1426 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1427 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1428 if (Value *NewSel = foldAddNegate(E, F, LHS))
1429 return NewSel;
1430 }
1431
1432 if (!True || !False)
1433 return nullptr;
1434
1435 Value *NewSI = Builder.CreateSelect(Cond, True, False, I.getName(), SI);
1436 NewSI->takeName(&I);
1437 return NewSI;
1438}
1439
1440/// Freely adapt every user of V as-if V was changed to !V.
1441/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1443 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1444 for (User *U : make_early_inc_range(I->users())) {
1445 if (U == IgnoredUser)
1446 continue; // Don't consider this user.
1447 switch (cast<Instruction>(U)->getOpcode()) {
1448 case Instruction::Select: {
1449 auto *SI = cast<SelectInst>(U);
1450 SI->swapValues();
1451 SI->swapProfMetadata();
1452 break;
1453 }
1454 case Instruction::CondBr: {
1456 BI->swapSuccessors(); // swaps prof metadata too
1457 if (BPI)
1458 BPI->swapSuccEdgesProbabilities(BI->getParent());
1459 break;
1460 }
1461 case Instruction::Xor:
1463 // Add to worklist for DCE.
1465 break;
1466 default:
1467 llvm_unreachable("Got unexpected user - out of sync with "
1468 "canFreelyInvertAllUsersOf() ?");
1469 }
1470 }
1471
1472 // Update pre-existing debug value uses.
1473 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1474 llvm::findDbgValues(I, DbgVariableRecords);
1475
1476 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1477 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1478 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1479 Idx != End; ++Idx)
1480 if (DbgVal->getVariableLocationOp(Idx) == I)
1481 DbgVal->setExpression(
1482 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1483 }
1484}
1485
1486/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1487/// constant zero (which is the 'negate' form).
1488Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1489 Value *NegV;
1490 if (match(V, m_Neg(m_Value(NegV))))
1491 return NegV;
1492
1493 // Constants can be considered to be negated values if they can be folded.
1495 return ConstantExpr::getNeg(C);
1496
1498 if (C->getType()->getElementType()->isIntegerTy())
1499 return ConstantExpr::getNeg(C);
1500
1502 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1503 Constant *Elt = CV->getAggregateElement(i);
1504 if (!Elt)
1505 return nullptr;
1506
1507 if (isa<UndefValue>(Elt))
1508 continue;
1509
1510 if (!isa<ConstantInt>(Elt))
1511 return nullptr;
1512 }
1513 return ConstantExpr::getNeg(CV);
1514 }
1515
1516 // Negate integer vector splats.
1517 if (auto *CV = dyn_cast<Constant>(V))
1518 if (CV->getType()->isVectorTy() &&
1519 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1520 return ConstantExpr::getNeg(CV);
1521
1522 return nullptr;
1523}
1524
1525// Try to fold:
1526// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1527// -> ({s|u}itofp (int_binop x, y))
1528// 2) (fp_binop ({s|u}itofp x), FpC)
1529// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1530//
1531// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1532Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1533 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1535
1536 Type *FPTy = BO.getType();
1537 Type *IntTy = IntOps[0]->getType();
1538
1539 unsigned IntSz = IntTy->getScalarSizeInBits();
1540 // This is the maximum number of inuse bits by the integer where the int -> fp
1541 // casts are exact.
1542 unsigned MaxRepresentableBits =
1544
1545 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1546 // checks later on.
1547 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1548
1549 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1550 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1551 auto IsNonZero = [&](unsigned OpNo) -> bool {
1552 if (OpsKnown[OpNo].hasKnownBits() &&
1553 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1554 return true;
1555 return isKnownNonZero(IntOps[OpNo], SQ);
1556 };
1557
1558 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1559 // NB: This matches the impl in ValueTracking, we just try to use cached
1560 // knownbits here. If we ever start supporting WithCache for
1561 // `isKnownNonNegative`, change this to an explicit call.
1562 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1563 };
1564
1565 // Check if we know for certain that ({s|u}itofp op) is exact.
1566 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1567 // Can we treat this operand as the desired sign?
1568 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1569 !IsNonNeg(OpNo))
1570 return false;
1571
1572 // If fp precision >= bitwidth(op) then its exact.
1573 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1574 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1575 // handled specially. We can't, however, increase the bound arbitrarily for
1576 // `sitofp` as for larger sizes, it won't sign extend.
1577 if (MaxRepresentableBits < IntSz) {
1578 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1579 // numSignBits(op).
1580 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1581 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1582 if (OpsFromSigned)
1583 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1584 // Finally for unsigned check that fp precision >= bitwidth(op) -
1585 // numLeadingZeros(op).
1586 else {
1587 NumUsedLeadingBits[OpNo] =
1588 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1589 }
1590 }
1591 // NB: We could also check if op is known to be a power of 2 or zero (which
1592 // will always be representable). Its unlikely, however, that is we are
1593 // unable to bound op in any way we will be able to pass the overflow checks
1594 // later on.
1595
1596 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1597 return false;
1598 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1599 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1600 IsNonZero(OpNo);
1601 };
1602
1603 // If we have a constant rhs, see if we can losslessly convert it to an int.
1604 if (Op1FpC != nullptr) {
1605 // Signed + Mul req non-zero
1606 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1607 !match(Op1FpC, m_NonZeroFP()))
1608 return nullptr;
1609
1611 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1612 IntTy, DL);
1613 if (Op1IntC == nullptr)
1614 return nullptr;
1615 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1616 : Instruction::UIToFP,
1617 Op1IntC, FPTy, DL) != Op1FpC)
1618 return nullptr;
1619
1620 // First try to keep sign of cast the same.
1621 IntOps[1] = Op1IntC;
1622 }
1623
1624 // Ensure lhs/rhs integer types match.
1625 if (IntTy != IntOps[1]->getType())
1626 return nullptr;
1627
1628 if (Op1FpC == nullptr) {
1629 if (!IsValidPromotion(1))
1630 return nullptr;
1631 }
1632 if (!IsValidPromotion(0))
1633 return nullptr;
1634
1635 // Final we check if the integer version of the binop will not overflow.
1637 // Because of the precision check, we can often rule out overflows.
1638 bool NeedsOverflowCheck = true;
1639 // Try to conservatively rule out overflow based on the already done precision
1640 // checks.
1641 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1642 unsigned OverflowMaxCurBits =
1643 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1644 bool OutputSigned = OpsFromSigned;
1645 switch (BO.getOpcode()) {
1646 case Instruction::FAdd:
1647 IntOpc = Instruction::Add;
1648 OverflowMaxOutputBits += OverflowMaxCurBits;
1649 break;
1650 case Instruction::FSub:
1651 IntOpc = Instruction::Sub;
1652 OverflowMaxOutputBits += OverflowMaxCurBits;
1653 break;
1654 case Instruction::FMul:
1655 IntOpc = Instruction::Mul;
1656 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1657 break;
1658 default:
1659 llvm_unreachable("Unsupported binop");
1660 }
1661 // The precision check may have already ruled out overflow.
1662 if (OverflowMaxOutputBits < IntSz) {
1663 NeedsOverflowCheck = false;
1664 // We can bound unsigned overflow from sub to in range signed value (this is
1665 // what allows us to avoid the overflow check for sub).
1666 if (IntOpc == Instruction::Sub)
1667 OutputSigned = true;
1668 }
1669
1670 // Precision check did not rule out overflow, so need to check.
1671 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1672 // `IntOps[...]` arguments to `KnownOps[...]`.
1673 if (NeedsOverflowCheck &&
1674 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1675 return nullptr;
1676
1677 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1678 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1679 IntBO->setHasNoSignedWrap(OutputSigned);
1680 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1681 }
1682 if (OutputSigned)
1683 return new SIToFPInst(IntBinOp, FPTy);
1684 return new UIToFPInst(IntBinOp, FPTy);
1685}
1686
1687// Try to fold:
1688// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1689// -> ({s|u}itofp (int_binop x, y))
1690// 2) (fp_binop ({s|u}itofp x), FpC)
1691// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1692Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1693 // Don't perform the fold on vectors, as the integer operation may be much
1694 // more expensive than the float operation in that case.
1695 if (BO.getType()->isVectorTy())
1696 return nullptr;
1697
1698 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1699 Constant *Op1FpC = nullptr;
1700 // Check for:
1701 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1702 // 2) (binop ({s|u}itofp x), FpC)
1703 if (!match(BO.getOperand(0), m_IToFP(m_Value(IntOps[0]))))
1704 return nullptr;
1705
1706 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1707 !match(BO.getOperand(1), m_IToFP(m_Value(IntOps[1]))))
1708 return nullptr;
1709
1710 // Cache KnownBits a bit to potentially save some analysis.
1711 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1712
1713 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1714 // different constraints depending on the sign of the cast.
1715 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1716 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1717 IntOps, Op1FpC, OpsKnown))
1718 return R;
1719 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1720 Op1FpC, OpsKnown);
1721}
1722
1723/// A binop with a constant operand and a sign-extended boolean operand may be
1724/// converted into a select of constants by applying the binary operation to
1725/// the constant with the two possible values of the extended boolean (0 or -1).
1726Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1727 // TODO: Handle non-commutative binop (constant is operand 0).
1728 // TODO: Handle zext.
1729 // TODO: Peek through 'not' of cast.
1730 Value *BO0 = BO.getOperand(0);
1731 Value *BO1 = BO.getOperand(1);
1732 Value *X;
1733 Constant *C;
1734 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1735 !X->getType()->isIntOrIntVectorTy(1))
1736 return nullptr;
1737
1738 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1741 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1742 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1743 return createSelectInstWithUnknownProfile(X, TVal, FVal);
1744}
1745
1747 bool IsTrueArm) {
1749 for (Value *Op : I.operands()) {
1750 Value *V = nullptr;
1751 if (Op == SI) {
1752 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1753 } else if (match(SI->getCondition(),
1756 m_Specific(Op), m_Value(V))) &&
1758 // Pass
1759 } else if (match(Op, m_ZExt(m_Specific(SI->getCondition())))) {
1760 V = IsTrueArm ? ConstantInt::get(Op->getType(), 1)
1761 : ConstantInt::getNullValue(Op->getType());
1762 } else {
1763 V = Op;
1764 }
1765 Ops.push_back(V);
1766 }
1767
1768 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1769}
1770
1772 Value *NewOp, InstCombiner &IC) {
1773 Instruction *Clone = I.clone();
1774 Clone->replaceUsesOfWith(SI, NewOp);
1776 IC.InsertNewInstBefore(Clone, I.getIterator());
1777 return Clone;
1778}
1779
1781 bool FoldWithMultiUse,
1782 bool SimplifyBothArms) {
1783 // Don't modify shared select instructions unless set FoldWithMultiUse
1784 if (!SI->hasOneUser() && !FoldWithMultiUse)
1785 return nullptr;
1786
1787 Value *TV = SI->getTrueValue();
1788 Value *FV = SI->getFalseValue();
1789
1790 // Bool selects with constant operands can be folded to logical ops.
1791 if (SI->getType()->isIntOrIntVectorTy(1))
1792 return nullptr;
1793
1794 // Avoid breaking min/max reduction pattern,
1795 // which is necessary for vectorization later.
1797 for (Value *IntrinOp : Op.operands())
1798 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1799 for (Value *PhiOp : PN->operands())
1800 if (PhiOp == &Op)
1801 return nullptr;
1802
1803 // Test if a FCmpInst instruction is used exclusively by a select as
1804 // part of a minimum or maximum operation. If so, refrain from doing
1805 // any other folding. This helps out other analyses which understand
1806 // non-obfuscated minimum and maximum idioms. And in this case, at
1807 // least one of the comparison operands has at least one user besides
1808 // the compare (the select), which would often largely negate the
1809 // benefit of folding anyway.
1810 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1811 if (CI->hasOneUse()) {
1812 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1813 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1814 !CI->isCommutative())
1815 return nullptr;
1816 }
1817 }
1818
1819 // Make sure that one of the select arms folds successfully.
1820 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1821 Value *NewFV =
1822 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1823 if (!NewTV && !NewFV)
1824 return nullptr;
1825
1826 if (SimplifyBothArms && !(NewTV && NewFV))
1827 return nullptr;
1828
1829 // Create an instruction for the arm that did not fold.
1830 if (!NewTV)
1831 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1832 if (!NewFV)
1833 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1834 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1835}
1836
1838 Value *InValue, BasicBlock *InBB,
1839 const DataLayout &DL,
1840 const SimplifyQuery SQ) {
1841 // NB: It is a precondition of this transform that the operands be
1842 // phi translatable!
1844 for (Value *Op : I.operands()) {
1845 if (Op == PN)
1846 Ops.push_back(InValue);
1847 else
1848 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1849 }
1850
1851 // Don't consider the simplification successful if we get back a constant
1852 // expression. That's just an instruction in hiding.
1853 // Also reject the case where we simplify back to the phi node. We wouldn't
1854 // be able to remove it in that case.
1856 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1857 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1858 return NewVal;
1859
1860 // Check if incoming PHI value can be replaced with constant
1861 // based on implied condition.
1862 CondBrInst *TerminatorBI = dyn_cast<CondBrInst>(InBB->getTerminator());
1863 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1864 if (TerminatorBI &&
1865 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1866 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1867 std::optional<bool> ImpliedCond = isImpliedCondition(
1868 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1869 DL, LHSIsTrue);
1870 if (ImpliedCond)
1871 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1872 }
1873
1874 return nullptr;
1875}
1876
1877/// In some cases it is beneficial to fold a select into a binary operator.
1878/// For example:
1879/// %1 = or %in, 4
1880/// %2 = select %cond, %1, %in
1881/// %3 = or %2, 1
1882/// =>
1883/// %1 = select i1 %cond, 5, 1
1884/// %2 = or %1, %in
1886 assert(Op.isAssociative() && "The operation must be associative!");
1887
1888 SelectInst *SI = dyn_cast<SelectInst>(Op.getOperand(0));
1889
1890 Constant *Const;
1891 if (!SI || !match(Op.getOperand(1), m_ImmConstant(Const)) ||
1892 !Op.hasOneUse() || !SI->hasOneUse())
1893 return nullptr;
1894
1895 Value *TV = SI->getTrueValue();
1896 Value *FV = SI->getFalseValue();
1897 Value *Input, *NewTV, *NewFV;
1898 Constant *Const2;
1899
1900 if (TV->hasOneUse() && match(TV, m_BinOp(Op.getOpcode(), m_Specific(FV),
1901 m_ImmConstant(Const2)))) {
1902 NewTV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1903 NewFV = Const;
1904 Input = FV;
1905 } else if (FV->hasOneUse() &&
1906 match(FV, m_BinOp(Op.getOpcode(), m_Specific(TV),
1907 m_ImmConstant(Const2)))) {
1908 NewTV = Const;
1909 NewFV = ConstantFoldBinaryInstruction(Op.getOpcode(), Const, Const2);
1910 Input = TV;
1911 } else
1912 return nullptr;
1913
1914 if (!NewTV || !NewFV)
1915 return nullptr;
1916
1917 Value *NewSI =
1918 Builder.CreateSelect(SI->getCondition(), NewTV, NewFV, "",
1919 ProfcheckDisableMetadataFixes ? nullptr : SI);
1920 return BinaryOperator::Create(Op.getOpcode(), NewSI, Input);
1921}
1922
1924 bool AllowMultipleUses) {
1925 unsigned NumPHIValues = PN->getNumIncomingValues();
1926 if (NumPHIValues == 0)
1927 return nullptr;
1928
1929 // We normally only transform phis with a single use. However, if a PHI has
1930 // multiple uses and they are all the same operation, we can fold *all* of the
1931 // uses into the PHI.
1932 bool OneUse = PN->hasOneUse();
1933 bool IdenticalUsers = false;
1934 if (!AllowMultipleUses && !OneUse) {
1935 // Walk the use list for the instruction, comparing them to I.
1936 for (User *U : PN->users()) {
1938 if (UI != &I && !I.isIdenticalTo(UI))
1939 return nullptr;
1940 }
1941 // Otherwise, we can replace *all* users with the new PHI we form.
1942 IdenticalUsers = true;
1943 }
1944
1945 // Check that all operands are phi-translatable.
1946 for (Value *Op : I.operands()) {
1947 if (Op == PN)
1948 continue;
1949
1950 // Non-instructions never require phi-translation.
1951 auto *I = dyn_cast<Instruction>(Op);
1952 if (!I)
1953 continue;
1954
1955 // Phi-translate can handle phi nodes in the same block.
1956 if (isa<PHINode>(I))
1957 if (I->getParent() == PN->getParent())
1958 continue;
1959
1960 // Operand dominates the block, no phi-translation necessary.
1961 if (DT.dominates(I, PN->getParent()))
1962 continue;
1963
1964 // Not phi-translatable, bail out.
1965 return nullptr;
1966 }
1967
1968 // Check to see whether the instruction can be folded into each phi operand.
1969 // If there is one operand that does not fold, remember the BB it is in.
1970 SmallVector<Value *> NewPhiValues;
1971 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1972 bool SeenNonSimplifiedInVal = false;
1973 for (unsigned i = 0; i != NumPHIValues; ++i) {
1974 Value *InVal = PN->getIncomingValue(i);
1975 BasicBlock *InBB = PN->getIncomingBlock(i);
1976
1977 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1978 NewPhiValues.push_back(NewVal);
1979 continue;
1980 }
1981
1982 // Handle some cases that can't be fully simplified, but where we know that
1983 // the two instructions will fold into one.
1984 auto WillFold = [&]() {
1985 if (!InVal->hasUseList() || !InVal->hasOneUser())
1986 return false;
1987
1988 // icmp of ucmp/scmp with constant will fold to icmp.
1989 const APInt *Ignored;
1990 if (isa<CmpIntrinsic>(InVal) &&
1991 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1992 return true;
1993
1994 // icmp eq zext(bool), 0 will fold to !bool.
1995 if (isa<ZExtInst>(InVal) &&
1996 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1997 match(&I,
1999 return true;
2000
2001 return false;
2002 };
2003
2004 if (WillFold()) {
2005 OpsToMoveUseToIncomingBB.push_back(i);
2006 NewPhiValues.push_back(nullptr);
2007 continue;
2008 }
2009
2010 if (!OneUse && !IdenticalUsers)
2011 return nullptr;
2012
2013 if (SeenNonSimplifiedInVal)
2014 return nullptr; // More than one non-simplified value.
2015 SeenNonSimplifiedInVal = true;
2016
2017 // If there is exactly one non-simplified value, we can insert a copy of the
2018 // operation in that block. However, if this is a critical edge, we would
2019 // be inserting the computation on some other paths (e.g. inside a loop).
2020 // Only do this if the pred block is unconditionally branching into the phi
2021 // block. Also, make sure that the pred block is not dead code.
2023 if (!BI || !DT.isReachableFromEntry(InBB))
2024 return nullptr;
2025
2026 NewPhiValues.push_back(nullptr);
2027 OpsToMoveUseToIncomingBB.push_back(i);
2028
2029 // Do not push the operation across a loop backedge. This could result in
2030 // an infinite combine loop, and is generally non-profitable (especially
2031 // if the operation was originally outside the loop).
2032 if (isBackEdge(InBB, PN->getParent()))
2033 return nullptr;
2034 }
2035
2036 // Clone the instruction that uses the phi node and move it into the incoming
2037 // BB because we know that the next iteration of InstCombine will simplify it.
2039 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
2041 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
2042
2043 Instruction *Clone = Clones.lookup(OpBB);
2044 if (!Clone) {
2045 Clone = I.clone();
2046 for (Use &U : Clone->operands()) {
2047 if (U == PN)
2048 U = Op;
2049 else
2050 U = U->DoPHITranslation(PN->getParent(), OpBB);
2051 }
2052 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
2053 Clones.insert({OpBB, Clone});
2054 // We may have speculated the instruction.
2056 }
2057
2058 NewPhiValues[OpIndex] = Clone;
2059 }
2060
2061 // Okay, we can do the transformation: create the new PHI node.
2062 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2063 InsertNewInstBefore(NewPN, PN->getIterator());
2064 NewPN->takeName(PN);
2065 NewPN->setDebugLoc(PN->getDebugLoc());
2066
2067 for (unsigned i = 0; i != NumPHIValues; ++i)
2068 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2069
2070 if (IdenticalUsers) {
2071 // Collect and deduplicate users up-front to avoid iterator invalidation.
2073 for (User *U : PN->users()) {
2075 if (User == &I)
2076 continue;
2077 ToReplace.insert(User);
2078 }
2079 for (Instruction *I : ToReplace) {
2080 replaceInstUsesWith(*I, NewPN);
2082 }
2083 OneUse = true;
2084 }
2085
2086 if (OneUse) {
2087 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2088 }
2089 return replaceInstUsesWith(I, NewPN);
2090}
2091
2093 if (!BO.isAssociative())
2094 return nullptr;
2095
2096 // Find the interleaved binary ops.
2097 auto Opc = BO.getOpcode();
2098 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2099 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2100 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2101 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2102 !BO0->isAssociative() || !BO1->isAssociative() ||
2103 BO0->getParent() != BO1->getParent())
2104 return nullptr;
2105
2106 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2107 "Expected commutative instructions!");
2108
2109 // Find the matching phis, forming the recurrences.
2110 PHINode *PN0, *PN1;
2111 Value *Start0, *Step0, *Start1, *Step1;
2112 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2113 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2114 PN0->getParent() != PN1->getParent())
2115 return nullptr;
2116
2117 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2118 "Expected PHIs with two incoming values!");
2119
2120 // Convert the start and step values to constants.
2121 auto *Init0 = dyn_cast<Constant>(Start0);
2122 auto *Init1 = dyn_cast<Constant>(Start1);
2123 auto *C0 = dyn_cast<Constant>(Step0);
2124 auto *C1 = dyn_cast<Constant>(Step1);
2125 if (!Init0 || !Init1 || !C0 || !C1)
2126 return nullptr;
2127
2128 // Fold the recurrence constants.
2129 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2130 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2131 if (!Init || !C)
2132 return nullptr;
2133
2134 // Create the reduced PHI.
2135 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2136 "reduced.phi");
2137
2138 // Create the new binary op.
2139 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2140 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2141 // Intersect FMF flags for FADD and FMUL.
2142 FastMathFlags Intersect = BO0->getFastMathFlags() &
2143 BO1->getFastMathFlags() & BO.getFastMathFlags();
2144 NewBO->setFastMathFlags(Intersect);
2145 } else {
2146 OverflowTracking Flags;
2147 Flags.AllKnownNonNegative = false;
2148 Flags.AllKnownNonZero = false;
2149 Flags.mergeFlags(*BO0);
2150 Flags.mergeFlags(*BO1);
2151 Flags.mergeFlags(BO);
2152 Flags.applyFlags(*NewBO);
2153 }
2154 NewBO->takeName(&BO);
2155
2156 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2157 auto *V = PN0->getIncomingValue(I);
2158 auto *BB = PN0->getIncomingBlock(I);
2159 if (V == Init0) {
2160 assert(((PN1->getIncomingValue(0) == Init1 &&
2161 PN1->getIncomingBlock(0) == BB) ||
2162 (PN1->getIncomingValue(1) == Init1 &&
2163 PN1->getIncomingBlock(1) == BB)) &&
2164 "Invalid incoming block!");
2165 NewPN->addIncoming(Init, BB);
2166 } else if (V == BO0) {
2167 assert(((PN1->getIncomingValue(0) == BO1 &&
2168 PN1->getIncomingBlock(0) == BB) ||
2169 (PN1->getIncomingValue(1) == BO1 &&
2170 PN1->getIncomingBlock(1) == BB)) &&
2171 "Invalid incoming block!");
2172 NewPN->addIncoming(NewBO, BB);
2173 } else
2174 llvm_unreachable("Unexpected incoming value!");
2175 }
2176
2177 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2178 << "\n with " << *PN1 << "\n " << *BO1
2179 << '\n');
2180
2181 // Insert the new recurrence and remove the old (dead) ones.
2182 InsertNewInstWith(NewPN, PN0->getIterator());
2183 InsertNewInstWith(NewBO, BO0->getIterator());
2184
2191
2192 return replaceInstUsesWith(BO, NewBO);
2193}
2194
2196 // Attempt to fold binary operators whose operands are simple recurrences.
2197 if (auto *NewBO = foldBinopWithRecurrence(BO))
2198 return NewBO;
2199
2200 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2201 // we are guarding against replicating the binop in >1 predecessor.
2202 // This could miss matching a phi with 2 constant incoming values.
2203 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2204 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2205 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2206 Phi0->getNumOperands() != Phi1->getNumOperands())
2207 return nullptr;
2208
2209 // TODO: Remove the restriction for binop being in the same block as the phis.
2210 if (BO.getParent() != Phi0->getParent() ||
2211 BO.getParent() != Phi1->getParent())
2212 return nullptr;
2213
2214 // Fold if there is at least one specific constant value in phi0 or phi1's
2215 // incoming values that comes from the same block and this specific constant
2216 // value can be used to do optimization for specific binary operator.
2217 // For example:
2218 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2219 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2220 // %add = add i32 %phi0, %phi1
2221 // ==>
2222 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2224 /*AllowRHSConstant*/ false);
2225 if (C) {
2226 SmallVector<Value *, 4> NewIncomingValues;
2227 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2228 auto &Phi0Use = std::get<0>(T);
2229 auto &Phi1Use = std::get<1>(T);
2230 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2231 return false;
2232 Value *Phi0UseV = Phi0Use.get();
2233 Value *Phi1UseV = Phi1Use.get();
2234 if (Phi0UseV == C)
2235 NewIncomingValues.push_back(Phi1UseV);
2236 else if (Phi1UseV == C)
2237 NewIncomingValues.push_back(Phi0UseV);
2238 else
2239 return false;
2240 return true;
2241 };
2242
2243 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2244 CanFoldIncomingValuePair)) {
2245 PHINode *NewPhi =
2246 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2247 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2248 "The number of collected incoming values should equal the number "
2249 "of the original PHINode operands!");
2250 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2251 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2252 return NewPhi;
2253 }
2254 }
2255
2256 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2257 return nullptr;
2258
2259 // Match a pair of incoming constants for one of the predecessor blocks.
2260 BasicBlock *ConstBB, *OtherBB;
2261 Constant *C0, *C1;
2262 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2263 ConstBB = Phi0->getIncomingBlock(0);
2264 OtherBB = Phi0->getIncomingBlock(1);
2265 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2266 ConstBB = Phi0->getIncomingBlock(1);
2267 OtherBB = Phi0->getIncomingBlock(0);
2268 } else {
2269 return nullptr;
2270 }
2271 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2272 return nullptr;
2273
2274 // The block that we are hoisting to must reach here unconditionally.
2275 // Otherwise, we could be speculatively executing an expensive or
2276 // non-speculative op.
2277 auto *PredBlockBranch = dyn_cast<UncondBrInst>(OtherBB->getTerminator());
2278 if (!PredBlockBranch || !DT.isReachableFromEntry(OtherBB))
2279 return nullptr;
2280
2281 // TODO: This check could be tightened to only apply to binops (div/rem) that
2282 // are not safe to speculatively execute. But that could allow hoisting
2283 // potentially expensive instructions (fdiv for example).
2284 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2286 return nullptr;
2287
2288 // Fold constants for the predecessor block with constant incoming values.
2289 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2290 if (!NewC)
2291 return nullptr;
2292
2293 // Make a new binop in the predecessor block with the non-constant incoming
2294 // values.
2295 Builder.SetInsertPoint(PredBlockBranch);
2296 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2297 Phi0->getIncomingValueForBlock(OtherBB),
2298 Phi1->getIncomingValueForBlock(OtherBB));
2299 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2300 NotFoldedNewBO->copyIRFlags(&BO);
2301
2302 // Replace the binop with a phi of the new values. The old phis are dead.
2303 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2304 NewPhi->addIncoming(NewBO, OtherBB);
2305 NewPhi->addIncoming(NewC, ConstBB);
2306 return NewPhi;
2307}
2308
2310 auto TryFoldOperand = [&](unsigned OpIdx,
2311 bool IsOtherParamConst) -> Instruction * {
2312 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(OpIdx)))
2313 return FoldOpIntoSelect(I, Sel, false, !IsOtherParamConst);
2314 if (auto *PN = dyn_cast<PHINode>(I.getOperand(OpIdx)))
2315 return foldOpIntoPhi(I, PN);
2316 return nullptr;
2317 };
2318
2319 if (Instruction *NewI =
2320 TryFoldOperand(/*OpIdx=*/0, isa<Constant>(I.getOperand(1))))
2321 return NewI;
2322 return TryFoldOperand(/*OpIdx=*/1, isa<Constant>(I.getOperand(0)));
2323}
2324
2326 // If this GEP has only 0 indices, it is the same pointer as
2327 // Src. If Src is not a trivial GEP too, don't combine
2328 // the indices.
2329 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2330 !Src.hasOneUse())
2331 return false;
2332 return true;
2333}
2334
2335/// Find a constant NewC that has property:
2336/// shuffle(NewC, ShMask) = C
2337/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2338///
2339/// A 1-to-1 mapping is not required. Example:
2340/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2342 VectorType *NewCTy) {
2343 if (isa<ScalableVectorType>(NewCTy)) {
2344 Constant *Splat = C->getSplatValue();
2345 if (!Splat)
2346 return nullptr;
2348 }
2349
2350 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2351 cast<FixedVectorType>(C->getType())->getNumElements())
2352 return nullptr;
2353
2354 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2355 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2356 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2357 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2358 for (unsigned I = 0; I < NumElts; ++I) {
2359 Constant *CElt = C->getAggregateElement(I);
2360 if (ShMask[I] >= 0) {
2361 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2362 Constant *NewCElt = NewVecC[ShMask[I]];
2363 // Bail out if:
2364 // 1. The constant vector contains a constant expression.
2365 // 2. The shuffle needs an element of the constant vector that can't
2366 // be mapped to a new constant vector.
2367 // 3. This is a widening shuffle that copies elements of V1 into the
2368 // extended elements (extending with poison is allowed).
2369 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2370 I >= NewCNumElts)
2371 return nullptr;
2372 NewVecC[ShMask[I]] = CElt;
2373 }
2374 }
2375 return ConstantVector::get(NewVecC);
2376}
2377
2378// Get the result of `Vector Op Splat` (or Splat Op Vector if \p SplatLHS).
2380 Constant *Splat, bool SplatLHS,
2381 const DataLayout &DL) {
2382 ElementCount EC = cast<VectorType>(Vector->getType())->getElementCount();
2384 Constant *RHS = Vector;
2385 if (!SplatLHS)
2386 std::swap(LHS, RHS);
2387 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
2388}
2389
2390template <Intrinsic::ID SpliceID>
2392 InstCombiner::BuilderTy &Builder) {
2393 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2394 auto CreateBinOpSplice = [&](Value *X, Value *Y, Value *Offset) {
2395 Value *V = Builder.CreateBinOp(Inst.getOpcode(), X, Y, Inst.getName());
2396 if (auto *BO = dyn_cast<BinaryOperator>(V))
2397 BO->copyIRFlags(&Inst);
2398 Module *M = Inst.getModule();
2399 Function *F = Intrinsic::getOrInsertDeclaration(M, SpliceID, V->getType());
2400 return CallInst::Create(F, {V, PoisonValue::get(V->getType()), Offset});
2401 };
2402 Value *V1, *V2, *Offset;
2403 if (match(LHS,
2405 // Op(splice(V1, poison, offset), splice(V2, poison, offset))
2406 // -> splice(Op(V1, V2), poison, offset)
2408 m_Specific(Offset))) &&
2409 (LHS->hasOneUse() || RHS->hasOneUse() ||
2410 (LHS == RHS && LHS->hasNUses(2))))
2411 return CreateBinOpSplice(V1, V2, Offset);
2412
2413 // Op(splice(V1, poison, offset), RHSSplat)
2414 // -> splice(Op(V1, RHSSplat), poison, offset)
2415 if (LHS->hasOneUse() && isSplatValue(RHS))
2416 return CreateBinOpSplice(V1, RHS, Offset);
2417 }
2418 // Op(LHSSplat, splice(V2, poison, offset))
2419 // -> splice(Op(LHSSplat, V2), poison, offset)
2420 else if (isSplatValue(LHS) &&
2422 m_Value(Offset)))))
2423 return CreateBinOpSplice(LHS, V2, Offset);
2424
2425 // TODO: Fold binops of the form
2426 // Op(splice(poison, V1, offset), splice(poison, V2, offset))
2427 // -> splice(poison, Op(V1, V2), offset)
2428
2429 return nullptr;
2430}
2431
2433 if (!isa<VectorType>(Inst.getType()))
2434 return nullptr;
2435
2436 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2437 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2438 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2439 cast<VectorType>(Inst.getType())->getElementCount());
2440 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2441 cast<VectorType>(Inst.getType())->getElementCount());
2442
2443 auto foldConstantsThroughSubVectorInsertSplat =
2444 [&](Value *MaybeSubVector, Value *MaybeSplat,
2445 bool SplatLHS) -> Instruction * {
2446 Value *Idx;
2447 Constant *Splat, *SubVector, *Dest;
2448 if (!match(MaybeSplat, m_ConstantSplat(m_Constant(Splat))) ||
2449 !match(MaybeSubVector,
2450 m_VectorInsert(m_Constant(Dest), m_Constant(SubVector),
2451 m_Value(Idx))))
2452 return nullptr;
2453 SubVector =
2454 constantFoldBinOpWithSplat(Opcode, SubVector, Splat, SplatLHS, DL);
2455 Dest = constantFoldBinOpWithSplat(Opcode, Dest, Splat, SplatLHS, DL);
2456 if (!SubVector || !Dest)
2457 return nullptr;
2458 auto *InsertVector =
2459 Builder.CreateInsertVector(Dest->getType(), Dest, SubVector, Idx);
2460 return replaceInstUsesWith(Inst, InsertVector);
2461 };
2462
2463 // If one operand is a constant splat and the other operand is a
2464 // `vector.insert` where both the destination and subvector are constant,
2465 // apply the operation to both the destination and subvector, returning a new
2466 // constant `vector.insert`. This helps constant folding for scalable vectors.
2467 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2468 /*MaybeSubVector=*/LHS, /*MaybeSplat=*/RHS, /*SplatLHS=*/false))
2469 return Folded;
2470 if (Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2471 /*MaybeSubVector=*/RHS, /*MaybeSplat=*/LHS, /*SplatLHS=*/true))
2472 return Folded;
2473
2474 // If both operands of the binop are vector concatenations, then perform the
2475 // narrow binop on each pair of the source operands followed by concatenation
2476 // of the results.
2477 Value *L0, *L1, *R0, *R1;
2478 ArrayRef<int> Mask;
2479 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2480 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2481 LHS->hasOneUse() && RHS->hasOneUse() &&
2482 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2483 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2484 // This transform does not have the speculative execution constraint as
2485 // below because the shuffle is a concatenation. The new binops are
2486 // operating on exactly the same elements as the existing binop.
2487 // TODO: We could ease the mask requirement to allow different undef lanes,
2488 // but that requires an analysis of the binop-with-undef output value.
2489 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2490 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2491 BO->copyIRFlags(&Inst);
2492 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2493 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2494 BO->copyIRFlags(&Inst);
2495 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2496 }
2497
2498 auto createBinOpReverse = [&](Value *X, Value *Y) {
2499 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2500 if (auto *BO = dyn_cast<BinaryOperator>(V))
2501 BO->copyIRFlags(&Inst);
2502 Module *M = Inst.getModule();
2504 M, Intrinsic::vector_reverse, V->getType());
2505 return CallInst::Create(F, V);
2506 };
2507
2508 // NOTE: Reverse shuffles don't require the speculative execution protection
2509 // below because they don't affect which lanes take part in the computation.
2510
2511 Value *V1, *V2;
2512 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2513 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2514 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2515 (LHS->hasOneUse() || RHS->hasOneUse() ||
2516 (LHS == RHS && LHS->hasNUses(2))))
2517 return createBinOpReverse(V1, V2);
2518
2519 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2520 if (LHS->hasOneUse() && isSplatValue(RHS))
2521 return createBinOpReverse(V1, RHS);
2522 }
2523 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2524 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2525 return createBinOpReverse(LHS, V2);
2526
2527 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2528 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2529 if (auto *BO = dyn_cast<BinaryOperator>(V))
2530 BO->copyIRFlags(&Inst);
2531
2532 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2533 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2534 Module *M = Inst.getModule();
2536 M, Intrinsic::experimental_vp_reverse, V->getType());
2537 return CallInst::Create(F, {V, AllTrueMask, EVL});
2538 };
2539
2540 Value *EVL;
2542 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2543 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2545 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2546 (LHS->hasOneUse() || RHS->hasOneUse() ||
2547 (LHS == RHS && LHS->hasNUses(2))))
2548 return createBinOpVPReverse(V1, V2, EVL);
2549
2550 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2551 if (LHS->hasOneUse() && isSplatValue(RHS))
2552 return createBinOpVPReverse(V1, RHS, EVL);
2553 }
2554 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2555 else if (isSplatValue(LHS) &&
2557 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2558 return createBinOpVPReverse(LHS, V2, EVL);
2559
2560 if (Instruction *Folded =
2562 return Folded;
2563 if (Instruction *Folded =
2565 return Folded;
2566
2567 // It may not be safe to reorder shuffles and things like div, urem, etc.
2568 // because we may trap when executing those ops on unknown vector elements.
2569 // See PR20059.
2571 return nullptr;
2572
2573 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2574 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2575 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2576 BO->copyIRFlags(&Inst);
2577 return new ShuffleVectorInst(XY, M);
2578 };
2579
2580 // If both arguments of the binary operation are shuffles that use the same
2581 // mask and shuffle within a single vector, move the shuffle after the binop.
2582 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2583 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2584 V1->getType() == V2->getType() &&
2585 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2586 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2587 return createBinOpShuffle(V1, V2, Mask);
2588 }
2589
2590 // If both arguments of a commutative binop are select-shuffles that use the
2591 // same mask with commuted operands, the shuffles are unnecessary.
2592 if (Inst.isCommutative() &&
2593 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2594 match(RHS,
2595 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2596 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2597 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2598 // TODO: Allow shuffles that contain undefs in the mask?
2599 // That is legal, but it reduces undef knowledge.
2600 // TODO: Allow arbitrary shuffles by shuffling after binop?
2601 // That might be legal, but we have to deal with poison.
2602 if (LShuf->isSelect() &&
2603 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2604 RShuf->isSelect() &&
2605 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2606 // Example:
2607 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2608 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2609 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2610 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2611 NewBO->copyIRFlags(&Inst);
2612 return NewBO;
2613 }
2614 }
2615
2616 // If one argument is a shuffle within one vector and the other is a constant,
2617 // try moving the shuffle after the binary operation. This canonicalization
2618 // intends to move shuffles closer to other shuffles and binops closer to
2619 // other binops, so they can be folded. It may also enable demanded elements
2620 // transforms.
2621 Constant *C;
2623 m_Mask(Mask))),
2624 m_ImmConstant(C)))) {
2625 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2626 "Shuffle should not change scalar type");
2627
2628 bool ConstOp1 = isa<Constant>(RHS);
2629 if (Constant *NewC =
2631 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2632 // which will cause UB for div/rem. Mask them with a safe constant.
2633 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2634 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2635
2636 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2637 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2638 Value *NewLHS = ConstOp1 ? V1 : NewC;
2639 Value *NewRHS = ConstOp1 ? NewC : V1;
2640 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2641 }
2642 }
2643
2644 // Try to reassociate to sink a splat shuffle after a binary operation.
2645 if (Inst.isAssociative() && Inst.isCommutative()) {
2646 // Canonicalize shuffle operand as LHS.
2647 if (isa<ShuffleVectorInst>(RHS))
2648 std::swap(LHS, RHS);
2649
2650 Value *X;
2651 ArrayRef<int> MaskC;
2652 int SplatIndex;
2653 Value *Y, *OtherOp;
2654 if (!match(LHS,
2655 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2656 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2657 X->getType() != Inst.getType() ||
2658 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2659 return nullptr;
2660
2661 // FIXME: This may not be safe if the analysis allows undef elements. By
2662 // moving 'Y' before the splat shuffle, we are implicitly assuming
2663 // that it is not undef/poison at the splat index.
2664 if (isSplatValue(OtherOp, SplatIndex)) {
2665 std::swap(Y, OtherOp);
2666 } else if (!isSplatValue(Y, SplatIndex)) {
2667 return nullptr;
2668 }
2669
2670 // X and Y are splatted values, so perform the binary operation on those
2671 // values followed by a splat followed by the 2nd binary operation:
2672 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2673 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2674 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2675 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2676 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2677
2678 // Intersect FMF on both new binops. Other (poison-generating) flags are
2679 // dropped to be safe.
2680 if (isa<FPMathOperator>(R)) {
2681 R->copyFastMathFlags(&Inst);
2682 R->andIRFlags(RHS);
2683 }
2684 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2685 NewInstBO->copyIRFlags(R);
2686 return R;
2687 }
2688
2689 return nullptr;
2690}
2691
2692/// Try to narrow the width of a binop if at least 1 operand is an extend of
2693/// of a value. This requires a potentially expensive known bits check to make
2694/// sure the narrow op does not overflow.
2695Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2696 // We need at least one extended operand.
2697 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2698
2699 // If this is a sub, we swap the operands since we always want an extension
2700 // on the RHS. The LHS can be an extension or a constant.
2701 if (BO.getOpcode() == Instruction::Sub)
2702 std::swap(Op0, Op1);
2703
2704 Value *X;
2705 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2706 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2707 return nullptr;
2708
2709 // If both operands are the same extension from the same source type and we
2710 // can eliminate at least one (hasOneUse), this might work.
2711 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2712 Value *Y;
2713 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2714 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2715 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2716 // If that did not match, see if we have a suitable constant operand.
2717 // Truncating and extending must produce the same constant.
2718 Constant *WideC;
2719 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2720 return nullptr;
2721 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2722 if (!NarrowC)
2723 return nullptr;
2724 Y = NarrowC;
2725 }
2726
2727 // Swap back now that we found our operands.
2728 if (BO.getOpcode() == Instruction::Sub)
2729 std::swap(X, Y);
2730
2731 // Both operands have narrow versions. Last step: the math must not overflow
2732 // in the narrow width.
2733 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2734 return nullptr;
2735
2736 // bo (ext X), (ext Y) --> ext (bo X, Y)
2737 // bo (ext X), C --> ext (bo X, C')
2738 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2739 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2740 if (IsSext)
2741 NewBinOp->setHasNoSignedWrap();
2742 else
2743 NewBinOp->setHasNoUnsignedWrap();
2744 }
2745 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2746}
2747
2748/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2749/// transform.
2754
2755/// Thread a GEP operation with constant indices through the constant true/false
2756/// arms of a select.
2758 InstCombiner::BuilderTy &Builder) {
2759 if (!GEP.hasAllConstantIndices())
2760 return nullptr;
2761
2762 Instruction *Sel;
2763 Value *Cond;
2764 Constant *TrueC, *FalseC;
2765 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2766 !match(Sel,
2767 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2768 return nullptr;
2769
2770 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2771 // Propagate 'inbounds' and metadata from existing instructions.
2772 // Note: using IRBuilder to create the constants for efficiency.
2773 SmallVector<Value *, 4> IndexC(GEP.indices());
2774 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2775 Type *Ty = GEP.getSourceElementType();
2776 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2777 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2778 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2779}
2780
2781// Canonicalization:
2782// gep T, (gep i8, base, C1), (Index + C2) into
2783// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2785 GEPOperator *Src,
2786 InstCombinerImpl &IC) {
2787 if (GEP.getNumIndices() != 1)
2788 return nullptr;
2789 auto &DL = IC.getDataLayout();
2790 Value *Base;
2791 const APInt *C1;
2792 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2793 return nullptr;
2794 Value *VarIndex;
2795 const APInt *C2;
2796 Type *PtrTy = Src->getType()->getScalarType();
2797 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2798 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2799 return nullptr;
2800 if (C1->getBitWidth() != IndexSizeInBits ||
2801 C2->getBitWidth() != IndexSizeInBits)
2802 return nullptr;
2803 Type *BaseType = GEP.getSourceElementType();
2805 return nullptr;
2806 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2807 APInt NewOffset = TypeSize * *C2 + *C1;
2808 if (NewOffset.isZero() ||
2809 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2811 if (GEP.hasNoUnsignedWrap() &&
2812 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2813 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2815 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2816 Flags |= GEPNoWrapFlags::inBounds();
2817 }
2818
2819 Value *GEPConst =
2820 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2821 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2822 }
2823
2824 return nullptr;
2825}
2826
2827/// Combine constant offsets separated by variable offsets.
2828/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2830 InstCombinerImpl &IC) {
2831 if (!GEP.hasAllConstantIndices())
2832 return nullptr;
2833
2836 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2837 while (true) {
2838 if (!InnerGEP)
2839 return nullptr;
2840
2841 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2842 if (InnerGEP->hasAllConstantIndices())
2843 break;
2844
2845 if (!InnerGEP->hasOneUse())
2846 return nullptr;
2847
2848 Skipped.push_back(InnerGEP);
2849 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2850 }
2851
2852 // The two constant offset GEPs are directly adjacent: Let normal offset
2853 // merging handle it.
2854 if (Skipped.empty())
2855 return nullptr;
2856
2857 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2858 // if profitable.
2859 if (!InnerGEP->hasOneUse())
2860 return nullptr;
2861
2862 // Don't bother with vector splats.
2863 Type *Ty = GEP.getType();
2864 if (InnerGEP->getType() != Ty)
2865 return nullptr;
2866
2867 const DataLayout &DL = IC.getDataLayout();
2868 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2869 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2870 !InnerGEP->accumulateConstantOffset(DL, Offset))
2871 return nullptr;
2872
2873 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2874 for (GetElementPtrInst *SkippedGEP : Skipped)
2875 SkippedGEP->setNoWrapFlags(NW);
2876
2877 return IC.replaceInstUsesWith(
2878 GEP,
2879 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2880 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2881}
2882
2884 GEPOperator *Src) {
2885 // Combine Indices - If the source pointer to this getelementptr instruction
2886 // is a getelementptr instruction with matching element type, combine the
2887 // indices of the two getelementptr instructions into a single instruction.
2888 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2889 return nullptr;
2890
2891 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2892 return I;
2893
2894 if (auto *I = combineConstantOffsets(GEP, *this))
2895 return I;
2896
2897 if (Src->getResultElementType() != GEP.getSourceElementType())
2898 return nullptr;
2899
2900 // Fold chained GEP with constant base into single GEP:
2901 // gep i8, (gep i8, %base, C1), (select Cond, C2, C3)
2902 // -> gep i8, %base, (select Cond, C1+C2, C1+C3)
2903 if (Src->hasOneUse() && GEP.getNumIndices() == 1 &&
2904 Src->getNumIndices() == 1) {
2905 Value *SrcIdx = *Src->idx_begin();
2906 Value *GEPIdx = *GEP.idx_begin();
2907 const APInt *ConstOffset, *TrueVal, *FalseVal;
2908 Value *Cond;
2909
2910 if ((match(SrcIdx, m_APInt(ConstOffset)) &&
2911 match(GEPIdx,
2912 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal)))) ||
2913 (match(GEPIdx, m_APInt(ConstOffset)) &&
2914 match(SrcIdx,
2915 m_Select(m_Value(Cond), m_APInt(TrueVal), m_APInt(FalseVal))))) {
2916 auto *Select = isa<SelectInst>(GEPIdx) ? cast<SelectInst>(GEPIdx)
2917 : cast<SelectInst>(SrcIdx);
2918
2919 // Make sure the select has only one use.
2920 if (!Select->hasOneUse())
2921 return nullptr;
2922
2923 if (TrueVal->getBitWidth() != ConstOffset->getBitWidth() ||
2924 FalseVal->getBitWidth() != ConstOffset->getBitWidth())
2925 return nullptr;
2926
2927 APInt NewTrueVal = *ConstOffset + *TrueVal;
2928 APInt NewFalseVal = *ConstOffset + *FalseVal;
2929 Constant *NewTrue = ConstantInt::get(Select->getType(), NewTrueVal);
2930 Constant *NewFalse = ConstantInt::get(Select->getType(), NewFalseVal);
2931 Value *NewSelect = Builder.CreateSelect(
2932 Cond, NewTrue, NewFalse, /*Name=*/"",
2933 /*MDFrom=*/(ProfcheckDisableMetadataFixes ? nullptr : Select));
2934 GEPNoWrapFlags Flags =
2936 return replaceInstUsesWith(GEP,
2937 Builder.CreateGEP(GEP.getResultElementType(),
2938 Src->getPointerOperand(),
2939 NewSelect, "", Flags));
2940 }
2941 }
2942
2943 // Find out whether the last index in the source GEP is a sequential idx.
2944 bool EndsWithSequential = false;
2945 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2946 I != E; ++I)
2947 EndsWithSequential = I.isSequential();
2948 if (!EndsWithSequential)
2949 return nullptr;
2950
2951 // Replace: gep (gep %P, long B), long A, ...
2952 // With: T = long A+B; gep %P, T, ...
2953 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2954 Value *GO1 = GEP.getOperand(1);
2955
2956 // If they aren't the same type, then the input hasn't been processed
2957 // by the loop above yet (which canonicalizes sequential index types to
2958 // intptr_t). Just avoid transforming this until the input has been
2959 // normalized.
2960 if (SO1->getType() != GO1->getType())
2961 return nullptr;
2962
2963 Value *Sum =
2964 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2965 // Only do the combine when we are sure the cost after the
2966 // merge is never more than that before the merge.
2967 if (Sum == nullptr)
2968 return nullptr;
2969
2971 Indices.append(Src->op_begin() + 1, Src->op_end() - 1);
2972 Indices.push_back(Sum);
2973 Indices.append(GEP.op_begin() + 2, GEP.op_end());
2974
2975 // Don't create GEPs with more than one non-zero index.
2976 unsigned NumNonZeroIndices = count_if(Indices, [](Value *Idx) {
2977 auto *C = dyn_cast<Constant>(Idx);
2978 return !C || !C->isNullValue();
2979 });
2980 if (NumNonZeroIndices > 1)
2981 return nullptr;
2982
2983 return replaceInstUsesWith(
2984 GEP, Builder.CreateGEP(
2985 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2987}
2988
2991 bool &DoesConsume, unsigned Depth) {
2992 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2993 // ~(~(X)) -> X.
2994 Value *A, *B;
2995 if (match(V, m_Not(m_Value(A)))) {
2996 DoesConsume = true;
2997 return A;
2998 }
2999
3000 Constant *C;
3001 // Constants can be considered to be not'ed values.
3002 if (match(V, m_ImmConstant(C)))
3003 return ConstantExpr::getNot(C);
3004
3006 return nullptr;
3007
3008 // The rest of the cases require that we invert all uses so don't bother
3009 // doing the analysis if we know we can't use the result.
3010 if (!WillInvertAllUses)
3011 return nullptr;
3012
3013 // Compares can be inverted if all of their uses are being modified to use
3014 // the ~V.
3015 if (auto *I = dyn_cast<CmpInst>(V)) {
3016 if (Builder != nullptr)
3017 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
3018 I->getOperand(1));
3019 return NonNull;
3020 }
3021
3022 // If `V` is of the form `A + B` then `-1 - V` can be folded into
3023 // `(-1 - B) - A` if we are willing to invert all of the uses.
3024 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
3025 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3026 DoesConsume, Depth))
3027 return Builder ? Builder->CreateSub(BV, A) : NonNull;
3028 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3029 DoesConsume, Depth))
3030 return Builder ? Builder->CreateSub(AV, B) : NonNull;
3031 return nullptr;
3032 }
3033
3034 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
3035 // into `A ^ B` if we are willing to invert all of the uses.
3036 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
3037 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3038 DoesConsume, Depth))
3039 return Builder ? Builder->CreateXor(A, BV) : NonNull;
3040 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3041 DoesConsume, Depth))
3042 return Builder ? Builder->CreateXor(AV, B) : NonNull;
3043 return nullptr;
3044 }
3045
3046 // If `V` is of the form `B - A` then `-1 - V` can be folded into
3047 // `A + (-1 - B)` if we are willing to invert all of the uses.
3048 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
3049 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3050 DoesConsume, Depth))
3051 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
3052 return nullptr;
3053 }
3054
3055 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
3056 // into `A s>> B` if we are willing to invert all of the uses.
3057 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
3058 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3059 DoesConsume, Depth))
3060 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
3061 return nullptr;
3062 }
3063
3064 Value *Cond;
3065 // LogicOps are special in that we canonicalize them at the cost of an
3066 // instruction.
3067 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
3069 // Selects/min/max with invertible operands are freely invertible
3070 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
3071 bool LocalDoesConsume = DoesConsume;
3072 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
3073 LocalDoesConsume, Depth))
3074 return nullptr;
3075 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3076 LocalDoesConsume, Depth)) {
3077 DoesConsume = LocalDoesConsume;
3078 if (Builder != nullptr) {
3079 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3080 DoesConsume, Depth);
3081 assert(NotB != nullptr &&
3082 "Unable to build inverted value for known freely invertable op");
3083 if (auto *II = dyn_cast<IntrinsicInst>(V))
3084 return Builder->CreateBinaryIntrinsic(
3085 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
3086 return Builder->CreateSelect(
3087 Cond, NotA, NotB, "",
3089 }
3090 return NonNull;
3091 }
3092 }
3093
3094 if (PHINode *PN = dyn_cast<PHINode>(V)) {
3095 bool LocalDoesConsume = DoesConsume;
3097 for (Use &U : PN->operands()) {
3098 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
3099 Value *NewIncomingVal = getFreelyInvertedImpl(
3100 U.get(), /*WillInvertAllUses=*/false,
3101 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
3102 if (NewIncomingVal == nullptr)
3103 return nullptr;
3104 // Make sure that we can safely erase the original PHI node.
3105 if (NewIncomingVal == V)
3106 return nullptr;
3107 if (Builder != nullptr)
3108 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
3109 }
3110
3111 DoesConsume = LocalDoesConsume;
3112 if (Builder != nullptr) {
3114 Builder->SetInsertPoint(PN);
3115 PHINode *NewPN =
3116 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
3117 for (auto [Val, Pred] : IncomingValues)
3118 NewPN->addIncoming(Val, Pred);
3119 return NewPN;
3120 }
3121 return NonNull;
3122 }
3123
3124 if (match(V, m_SExtLike(m_Value(A)))) {
3125 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3126 DoesConsume, Depth))
3127 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
3128 return nullptr;
3129 }
3130
3131 if (match(V, m_Trunc(m_Value(A)))) {
3132 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3133 DoesConsume, Depth))
3134 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
3135 return nullptr;
3136 }
3137
3138 // De Morgan's Laws:
3139 // (~(A | B)) -> (~A & ~B)
3140 // (~(A & B)) -> (~A | ~B)
3141 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
3142 bool IsLogical, Value *A,
3143 Value *B) -> Value * {
3144 bool LocalDoesConsume = DoesConsume;
3145 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
3146 LocalDoesConsume, Depth))
3147 return nullptr;
3148 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3149 LocalDoesConsume, Depth)) {
3150 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3151 LocalDoesConsume, Depth);
3152 DoesConsume = LocalDoesConsume;
3153 if (IsLogical)
3154 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3155 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3156 }
3157
3158 return nullptr;
3159 };
3160
3161 if (match(V, m_Or(m_Value(A), m_Value(B))))
3162 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
3163 B);
3164
3165 if (match(V, m_And(m_Value(A), m_Value(B))))
3166 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
3167 B);
3168
3169 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
3170 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
3171 B);
3172
3173 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
3174 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
3175 B);
3176
3177 return nullptr;
3178}
3179
3180/// Return true if we should canonicalize the gep to an i8 ptradd.
3182 Value *PtrOp = GEP.getOperand(0);
3183 Type *GEPEltType = GEP.getSourceElementType();
3184 if (GEPEltType->isIntegerTy(8))
3185 return false;
3186
3187 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3188 // intrinsic. This has better support in BasicAA.
3189 if (GEPEltType->isScalableTy())
3190 return true;
3191
3192 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3193 // together.
3194 if (GEP.getNumIndices() == 1 &&
3195 match(GEP.getOperand(1),
3197 m_Shl(m_Value(), m_ConstantInt())))))
3198 return true;
3199
3200 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3201 // possibly be merged together.
3202 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3203 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3204 any_of(GEP.indices(), [](Value *V) {
3205 const APInt *C;
3206 return match(V, m_APInt(C)) && !C->isZero();
3207 });
3208}
3209
3211 IRBuilderBase &Builder) {
3212 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3213 if (!Op1)
3214 return nullptr;
3215
3216 // Don't fold a GEP into itself through a PHI node. This can only happen
3217 // through the back-edge of a loop. Folding a GEP into itself means that
3218 // the value of the previous iteration needs to be stored in the meantime,
3219 // thus requiring an additional register variable to be live, but not
3220 // actually achieving anything (the GEP still needs to be executed once per
3221 // loop iteration).
3222 if (Op1 == &GEP)
3223 return nullptr;
3224 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3225
3226 int DI = -1;
3227
3228 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3229 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3230 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3231 Op1->getSourceElementType() != Op2->getSourceElementType())
3232 return nullptr;
3233
3234 // As for Op1 above, don't try to fold a GEP into itself.
3235 if (Op2 == &GEP)
3236 return nullptr;
3237
3238 // Keep track of the type as we walk the GEP.
3239 Type *CurTy = nullptr;
3240
3241 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3242 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3243 return nullptr;
3244
3245 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3246 if (DI == -1) {
3247 // We have not seen any differences yet in the GEPs feeding the
3248 // PHI yet, so we record this one if it is allowed to be a
3249 // variable.
3250
3251 // The first two arguments can vary for any GEP, the rest have to be
3252 // static for struct slots
3253 if (J > 1) {
3254 assert(CurTy && "No current type?");
3255 if (CurTy->isStructTy())
3256 return nullptr;
3257 }
3258
3259 DI = J;
3260 } else {
3261 // The GEP is different by more than one input. While this could be
3262 // extended to support GEPs that vary by more than one variable it
3263 // doesn't make sense since it greatly increases the complexity and
3264 // would result in an R+R+R addressing mode which no backend
3265 // directly supports and would need to be broken into several
3266 // simpler instructions anyway.
3267 return nullptr;
3268 }
3269 }
3270
3271 // Sink down a layer of the type for the next iteration.
3272 if (J > 0) {
3273 if (J == 1) {
3274 CurTy = Op1->getSourceElementType();
3275 } else {
3276 CurTy =
3277 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3278 }
3279 }
3280 }
3281
3282 NW &= Op2->getNoWrapFlags();
3283 }
3284
3285 // If not all GEPs are identical we'll have to create a new PHI node.
3286 // Check that the old PHI node has only one use so that it will get
3287 // removed.
3288 if (DI != -1 && !PN->hasOneUse())
3289 return nullptr;
3290
3291 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3292 NewGEP->setNoWrapFlags(NW);
3293
3294 if (DI == -1) {
3295 // All the GEPs feeding the PHI are identical. Clone one down into our
3296 // BB so that it can be merged with the current GEP.
3297 } else {
3298 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3299 // into the current block so it can be merged, and create a new PHI to
3300 // set that index.
3301 PHINode *NewPN;
3302 {
3303 IRBuilderBase::InsertPointGuard Guard(Builder);
3304 Builder.SetInsertPoint(PN);
3305 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3306 PN->getNumOperands());
3307 }
3308
3309 for (auto &I : PN->operands())
3310 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3311 PN->getIncomingBlock(I));
3312
3313 NewGEP->setOperand(DI, NewPN);
3314 }
3315
3316 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3317 return NewGEP;
3318}
3319
3321 Value *PtrOp = GEP.getOperand(0);
3322 SmallVector<Value *, 8> Indices(GEP.indices());
3323 Type *GEPType = GEP.getType();
3324 Type *GEPEltType = GEP.getSourceElementType();
3325 if (Value *V =
3326 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3327 SQ.getWithInstruction(&GEP)))
3328 return replaceInstUsesWith(GEP, V);
3329
3330 // For vector geps, use the generic demanded vector support.
3331 // Skip if GEP return type is scalable. The number of elements is unknown at
3332 // compile-time.
3333 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3334 auto VWidth = GEPFVTy->getNumElements();
3335 APInt PoisonElts(VWidth, 0);
3336 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3337 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3338 PoisonElts)) {
3339 if (V != &GEP)
3340 return replaceInstUsesWith(GEP, V);
3341 return &GEP;
3342 }
3343 }
3344
3345 // Eliminate unneeded casts for indices, and replace indices which displace
3346 // by multiples of a zero size type with zero.
3347 bool MadeChange = false;
3348
3349 // Index width may not be the same width as pointer width.
3350 // Data layout chooses the right type based on supported integer types.
3351 Type *NewScalarIndexTy =
3352 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3353
3355 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3356 ++I, ++GTI) {
3357 // Skip indices into struct types.
3358 if (GTI.isStruct())
3359 continue;
3360
3361 Type *IndexTy = (*I)->getType();
3362 Type *NewIndexType =
3363 IndexTy->isVectorTy()
3364 ? VectorType::get(NewScalarIndexTy,
3365 cast<VectorType>(IndexTy)->getElementCount())
3366 : NewScalarIndexTy;
3367
3368 // If the element type has zero size then any index over it is equivalent
3369 // to an index of zero, so replace it with zero if it is not zero already.
3370 Type *EltTy = GTI.getIndexedType();
3371 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3372 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3373 *I = Constant::getNullValue(NewIndexType);
3374 MadeChange = true;
3375 }
3376
3377 if (IndexTy != NewIndexType) {
3378 // If we are using a wider index than needed for this platform, shrink
3379 // it to what we need. If narrower, sign-extend it to what we need.
3380 // This explicit cast can make subsequent optimizations more obvious.
3381 if (IndexTy->getScalarSizeInBits() <
3382 NewIndexType->getScalarSizeInBits()) {
3383 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3384 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3385 else
3386 *I = Builder.CreateSExt(*I, NewIndexType);
3387 } else {
3388 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3389 GEP.hasNoUnsignedSignedWrap());
3390 }
3391 MadeChange = true;
3392 }
3393 }
3394 if (MadeChange)
3395 return &GEP;
3396
3397 // Canonicalize constant GEPs to i8 type.
3398 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3399 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3400 if (GEP.accumulateConstantOffset(DL, Offset))
3401 return replaceInstUsesWith(
3402 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3403 GEP.getNoWrapFlags()));
3404 }
3405
3407 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3408 Value *NewGEP =
3409 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3410 return replaceInstUsesWith(GEP, NewGEP);
3411 }
3412
3413 // Strip trailing zero indices.
3414 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3415 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3416 return replaceInstUsesWith(
3417 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3418 drop_end(Indices), "", GEP.getNoWrapFlags()));
3419 }
3420
3421 // Strip leading zero indices.
3422 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3423 if (FirstIdx && FirstIdx->isNullValue() &&
3424 !FirstIdx->getType()->isVectorTy()) {
3426 ++GTI;
3427 if (!GTI.isStruct() && GTI.getSequentialElementStride(DL) ==
3428 DL.getTypeAllocSize(GTI.getIndexedType()))
3429 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3430 GEP.getPointerOperand(),
3431 drop_begin(Indices), "",
3432 GEP.getNoWrapFlags()));
3433 }
3434
3435 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3436 // Note that this looses information about undef lanes; we run it after
3437 // demanded bits to partially mitigate that loss.
3438 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3439 return Op->getType()->isVectorTy() && getSplatValue(Op);
3440 })) {
3441 SmallVector<Value *> NewOps;
3442 for (auto &Op : GEP.operands()) {
3443 if (Op->getType()->isVectorTy())
3444 if (Value *Scalar = getSplatValue(Op)) {
3445 NewOps.push_back(Scalar);
3446 continue;
3447 }
3448 NewOps.push_back(Op);
3449 }
3450
3451 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3452 ArrayRef(NewOps).drop_front(), GEP.getName(),
3453 GEP.getNoWrapFlags());
3454 if (!Res->getType()->isVectorTy()) {
3455 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3456 Res = Builder.CreateVectorSplat(EC, Res);
3457 }
3458 return replaceInstUsesWith(GEP, Res);
3459 }
3460
3461 bool SeenNonZeroIndex = false;
3462 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3463 // Ignore one leading zero index.
3464 auto *C = dyn_cast<Constant>(Idx);
3465 if (C && C->isNullValue() && IdxNum == 0)
3466 continue;
3467
3468 if (!SeenNonZeroIndex) {
3469 SeenNonZeroIndex = true;
3470 continue;
3471 }
3472
3473 // GEP has multiple non-zero indices: Split it.
3474 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3475 Value *FrontGEP =
3476 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3477 GEP.getName() + ".split", GEP.getNoWrapFlags());
3478
3479 SmallVector<Value *> BackIndices;
3480 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3481 append_range(BackIndices, drop_begin(Indices, IdxNum));
3483 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3484 BackIndices, GEP.getNoWrapFlags());
3485 }
3486
3487 // Canonicalize gep %T to gep [sizeof(%T) x i8]:
3488 auto IsCanonicalType = [](Type *Ty) {
3489 if (auto *AT = dyn_cast<ArrayType>(Ty))
3490 Ty = AT->getElementType();
3491 return Ty->isIntegerTy(8);
3492 };
3493 if (Indices.size() == 1 && !IsCanonicalType(GEPEltType)) {
3494 TypeSize Scale = DL.getTypeAllocSize(GEPEltType);
3495 assert(!Scale.isScalable() && "Should have been handled earlier");
3496 Type *NewElemTy = Builder.getInt8Ty();
3497 if (Scale.getFixedValue() != 1)
3498 NewElemTy = ArrayType::get(NewElemTy, Scale.getFixedValue());
3499 GEP.setSourceElementType(NewElemTy);
3500 GEP.setResultElementType(NewElemTy);
3501 // Don't bother revisiting the GEP after this change.
3502 MadeIRChange = true;
3503 }
3504
3505 // Check to see if the inputs to the PHI node are getelementptr instructions.
3506 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3507 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3508 return replaceOperand(GEP, 0, NewPtrOp);
3509 }
3510
3511 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3512 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3513 return I;
3514
3515 if (GEP.getNumIndices() == 1) {
3516 unsigned AS = GEP.getPointerAddressSpace();
3517 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3518 DL.getIndexSizeInBits(AS)) {
3519 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3520
3521 if (TyAllocSize == 1) {
3522 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3523 // but only if the result pointer is only used as if it were an integer.
3524 // (The case where the underlying object is the same is handled by
3525 // InstSimplify.)
3526 Value *X = GEP.getPointerOperand();
3527 Value *Y;
3528 if (match(GEP.getOperand(1), m_Sub(m_PtrToIntOrAddr(m_Value(Y)),
3530 GEPType == Y->getType()) {
3531 bool HasNonAddressBits =
3532 DL.getAddressSizeInBits(AS) != DL.getPointerSizeInBits(AS);
3533 bool Changed = GEP.replaceUsesWithIf(Y, [&](Use &U) {
3534 return isa<PtrToAddrInst, ICmpInst>(U.getUser()) ||
3535 (!HasNonAddressBits && isa<PtrToIntInst>(U.getUser()));
3536 });
3537 return Changed ? &GEP : nullptr;
3538 }
3539 } else if (auto *ExactIns =
3540 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3541 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3542 Value *V;
3543 if (ExactIns->isExact()) {
3544 if ((has_single_bit(TyAllocSize) &&
3545 match(GEP.getOperand(1),
3546 m_Shr(m_Value(V),
3547 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3548 match(GEP.getOperand(1),
3549 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3550 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3551 GEP.getPointerOperand(), V,
3552 GEP.getNoWrapFlags());
3553 }
3554 }
3555 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3556 // Try to canonicalize non-i8 element type to i8 if the index is an
3557 // exact instruction. If the index is an exact instruction (div/shr)
3558 // with a constant RHS, we can fold the non-i8 element scale into the
3559 // div/shr (similiar to the mul case, just inverted).
3560 const APInt *C;
3561 std::optional<APInt> NewC;
3562 if (has_single_bit(TyAllocSize) &&
3563 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3564 C->uge(countr_zero(TyAllocSize)))
3565 NewC = *C - countr_zero(TyAllocSize);
3566 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3567 APInt Quot;
3568 uint64_t Rem;
3569 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3570 if (Rem == 0)
3571 NewC = Quot;
3572 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3573 APInt Quot;
3574 int64_t Rem;
3575 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3576 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3577 if (!Quot.isAllOnes() && Rem == 0)
3578 NewC = Quot;
3579 }
3580
3581 if (NewC.has_value()) {
3582 Value *NewOp = Builder.CreateBinOp(
3583 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3584 ConstantInt::get(V->getType(), *NewC));
3585 cast<BinaryOperator>(NewOp)->setIsExact();
3586 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3587 GEP.getPointerOperand(), NewOp,
3588 GEP.getNoWrapFlags());
3589 }
3590 }
3591 }
3592 }
3593 }
3594 // We do not handle pointer-vector geps here.
3595 if (GEPType->isVectorTy())
3596 return nullptr;
3597
3598 if (!GEP.isInBounds()) {
3599 unsigned IdxWidth =
3600 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3601 APInt BasePtrOffset(IdxWidth, 0);
3602 Value *UnderlyingPtrOp =
3603 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3604 bool CanBeNull, CanBeFreed;
3605 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3606 DL, CanBeNull, CanBeFreed);
3607 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3608 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3609 BasePtrOffset.isNonNegative()) {
3610 APInt AllocSize(IdxWidth, DerefBytes);
3611 if (BasePtrOffset.ule(AllocSize)) {
3613 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3614 }
3615 }
3616 }
3617 }
3618
3619 // nusw + nneg -> nuw
3620 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3621 all_of(GEP.indices(), [&](Value *Idx) {
3622 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3623 })) {
3624 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3625 return &GEP;
3626 }
3627
3628 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3629 // to do this after having tried to derive "nuw" above.
3630 if (GEP.getNumIndices() == 1) {
3631 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3632 // geps if transforming into (gep (gep p, x), y).
3633 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3634 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3635 // that x + y does not have unsigned wrap.
3636 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3637 return GEP.getNoWrapFlags();
3638 return GEPNoWrapFlags::none();
3639 };
3640
3641 // Try to replace ADD + GEP with GEP + GEP.
3642 Value *Idx1, *Idx2;
3643 if (match(GEP.getOperand(1),
3644 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3645 // %idx = add i64 %idx1, %idx2
3646 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3647 // as:
3648 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3649 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3650 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3651 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3652 auto *NewPtr =
3653 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3654 Idx1, "", NWFlags);
3655 return replaceInstUsesWith(GEP,
3656 Builder.CreateGEP(GEP.getSourceElementType(),
3657 NewPtr, Idx2, "", NWFlags));
3658 }
3659 ConstantInt *C;
3660 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3661 m_Value(Idx1), m_ConstantInt(C))))))) {
3662 // %add = add nsw i32 %idx1, idx2
3663 // %sidx = sext i32 %add to i64
3664 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3665 // as:
3666 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3667 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3668 bool NUW = match(GEP.getOperand(1),
3670 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3671 auto *NewPtr = Builder.CreateGEP(
3672 GEP.getSourceElementType(), GEP.getPointerOperand(),
3673 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3674 return replaceInstUsesWith(
3675 GEP,
3676 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3677 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3678 "", NWFlags));
3679 }
3680 }
3681
3683 return R;
3684
3685 // srem -> (and/urem) for inbounds+nuw GEP
3686 if (Indices.size() == 1 && GEP.isInBounds() && GEP.hasNoUnsignedWrap()) {
3687 Value *X, *Y;
3688
3689 // Match: idx = srem X, Y -- where Y is a power-of-two value.
3690 if (match(Indices[0], m_OneUse(m_SRem(m_Value(X), m_Value(Y)))) &&
3691 isKnownToBeAPowerOfTwo(Y, /*OrZero=*/true, &GEP)) {
3692 // If GEP is inbounds+nuw, the offset cannot be negative
3693 // -> srem by power-of-two can be treated as urem,
3694 // and urem by power-of-two folds to 'and' later.
3695 // OrZero=true is fine here because division by zero is UB.
3696 Instruction *OldIdxI = cast<Instruction>(Indices[0]);
3697 Value *NewIdx = Builder.CreateURem(X, Y, OldIdxI->getName());
3698
3699 return GetElementPtrInst::Create(GEPEltType, PtrOp, {NewIdx},
3700 GEP.getNoWrapFlags());
3701 }
3702 }
3703
3704 return nullptr;
3705}
3706
3708 Instruction *AI) {
3710 return true;
3711 if (auto *LI = dyn_cast<LoadInst>(V))
3712 return isa<GlobalVariable>(LI->getPointerOperand());
3713 // Two distinct allocations will never be equal.
3714 return isAllocLikeFn(V, &TLI) && V != AI;
3715}
3716
3717/// Given a call CB which uses an address UsedV, return true if we can prove the
3718/// call's only possible effect is storing to V.
3719static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3720 const TargetLibraryInfo &TLI) {
3721 if (!CB.use_empty())
3722 // TODO: add recursion if returned attribute is present
3723 return false;
3724
3725 if (CB.isTerminator())
3726 // TODO: remove implementation restriction
3727 return false;
3728
3729 if (!CB.willReturn() || !CB.doesNotThrow())
3730 return false;
3731
3732 // If the only possible side effect of the call is writing to the alloca,
3733 // and the result isn't used, we can safely remove any reads implied by the
3734 // call including those which might read the alloca itself.
3735 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3736 return Dest && Dest->Ptr == UsedV;
3737}
3738
3739static std::optional<ModRefInfo>
3741 const TargetLibraryInfo &TLI, bool KnowInit) {
3743 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3744 Worklist.push_back(AI);
3746
3747 do {
3748 Instruction *PI = Worklist.pop_back_val();
3749 for (User *U : PI->users()) {
3751 if (Users.size() >= MaxAllocSiteRemovableUsers)
3752 return std::nullopt;
3753 switch (I->getOpcode()) {
3754 default:
3755 // Give up the moment we see something we can't handle.
3756 return std::nullopt;
3757
3758 case Instruction::AddrSpaceCast:
3759 case Instruction::BitCast:
3760 case Instruction::GetElementPtr:
3761 Users.emplace_back(I);
3762 Worklist.push_back(I);
3763 continue;
3764
3765 case Instruction::ICmp: {
3766 ICmpInst *ICI = cast<ICmpInst>(I);
3767 // We can fold eq/ne comparisons with null to false/true, respectively.
3768 // We also fold comparisons in some conditions provided the alloc has
3769 // not escaped (see isNeverEqualToUnescapedAlloc).
3770 if (!ICI->isEquality())
3771 return std::nullopt;
3772 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3773 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3774 return std::nullopt;
3775
3776 // Do not fold compares to aligned_alloc calls, as they may have to
3777 // return null in case the required alignment cannot be satisfied,
3778 // unless we can prove that both alignment and size are valid.
3779 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3780 // Check if alignment and size of a call to aligned_alloc is valid,
3781 // that is alignment is a power-of-2 and the size is a multiple of the
3782 // alignment.
3783 const APInt *Alignment;
3784 const APInt *Size;
3785 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3786 match(CB->getArgOperand(1), m_APInt(Size)) &&
3787 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3788 };
3789 auto *CB = dyn_cast<CallBase>(AI);
3790 LibFunc TheLibFunc;
3791 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3792 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3793 !AlignmentAndSizeKnownValid(CB))
3794 return std::nullopt;
3795 Users.emplace_back(I);
3796 continue;
3797 }
3798
3799 case Instruction::Call:
3800 // Ignore no-op and store intrinsics.
3802 switch (II->getIntrinsicID()) {
3803 default:
3804 return std::nullopt;
3805
3806 case Intrinsic::memmove:
3807 case Intrinsic::memcpy:
3808 case Intrinsic::memset: {
3810 if (MI->isVolatile())
3811 return std::nullopt;
3812 // Note: this could also be ModRef, but we can still interpret that
3813 // as just Mod in that case.
3814 ModRefInfo NewAccess =
3815 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3816 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3817 return std::nullopt;
3818 Access |= NewAccess;
3819 [[fallthrough]];
3820 }
3821 case Intrinsic::assume:
3822 case Intrinsic::invariant_start:
3823 case Intrinsic::invariant_end:
3824 case Intrinsic::lifetime_start:
3825 case Intrinsic::lifetime_end:
3826 case Intrinsic::objectsize:
3827 Users.emplace_back(I);
3828 continue;
3829 case Intrinsic::launder_invariant_group:
3830 case Intrinsic::strip_invariant_group:
3831 Users.emplace_back(I);
3832 Worklist.push_back(I);
3833 continue;
3834 }
3835 }
3836
3837 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3838 getAllocationFamily(I, &TLI) == Family) {
3839 Users.emplace_back(I);
3840 continue;
3841 }
3842
3843 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3844 getAllocationFamily(I, &TLI) == Family) {
3845 Users.emplace_back(I);
3846 Worklist.push_back(I);
3847 continue;
3848 }
3849
3850 if (!isRefSet(Access) &&
3851 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3853 Users.emplace_back(I);
3854 continue;
3855 }
3856
3857 return std::nullopt;
3858
3859 case Instruction::Store: {
3861 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3862 return std::nullopt;
3863 if (isRefSet(Access))
3864 return std::nullopt;
3866 Users.emplace_back(I);
3867 continue;
3868 }
3869
3870 case Instruction::Load: {
3871 LoadInst *LI = cast<LoadInst>(I);
3872 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3873 return std::nullopt;
3874 if (isModSet(Access))
3875 return std::nullopt;
3877 Users.emplace_back(I);
3878 continue;
3879 }
3880 }
3881 llvm_unreachable("missing a return?");
3882 }
3883 } while (!Worklist.empty());
3884
3886 return Access;
3887}
3888
3891
3892 // If we have a malloc call which is only used in any amount of comparisons to
3893 // null and free calls, delete the calls and replace the comparisons with true
3894 // or false as appropriate.
3895
3896 // This is based on the principle that we can substitute our own allocation
3897 // function (which will never return null) rather than knowledge of the
3898 // specific function being called. In some sense this can change the permitted
3899 // outputs of a program (when we convert a malloc to an alloca, the fact that
3900 // the allocation is now on the stack is potentially visible, for example),
3901 // but we believe in a permissible manner.
3902 //
3903 // Collect into Instruction* first to avoid expensive WeakTrackingVH
3904 // register/unregister overhead; convert to WeakTrackingVH only when the
3905 // site is actually removable.
3907
3908 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3909 // before each store.
3911 std::unique_ptr<DIBuilder> DIB;
3912 if (isa<AllocaInst>(MI)) {
3913 findDbgUsers(&MI, DVRs);
3914 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3915 }
3916
3917 // Determine what getInitialValueOfAllocation would return without actually
3918 // allocating the result.
3919 bool KnowInitUndef = false;
3920 bool KnowInitZero = false;
3921 Constant *Init =
3923 if (Init) {
3924 if (isa<UndefValue>(Init))
3925 KnowInitUndef = true;
3926 else if (Init->isNullValue())
3927 KnowInitZero = true;
3928 }
3929 // The various sanitizers don't actually return undef memory, but rather
3930 // memory initialized with special forms of runtime poison
3931 auto &F = *MI.getFunction();
3932 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3933 F.hasFnAttribute(Attribute::SanitizeAddress))
3934 KnowInitUndef = false;
3935
3936 auto Removable =
3937 isAllocSiteRemovable(&MI, RawUsers, TLI, KnowInitZero | KnowInitUndef);
3938 if (Removable) {
3939 SmallVector<WeakTrackingVH, 64> Users(RawUsers.begin(), RawUsers.end());
3940 for (WeakTrackingVH &User : Users) {
3941 // Lowering all @llvm.objectsize and MTI calls first because they may use
3942 // a bitcast/GEP of the alloca we are removing.
3943 if (!User)
3944 continue;
3945
3947
3949 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3950 SmallVector<Instruction *> InsertedInstructions;
3951 Value *Result = lowerObjectSizeCall(
3952 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3953 for (Instruction *Inserted : InsertedInstructions)
3954 Worklist.add(Inserted);
3955 replaceInstUsesWith(*I, Result);
3957 User = nullptr; // Skip examining in the next loop.
3958 continue;
3959 }
3960 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3961 if (KnowInitZero && isRefSet(*Removable)) {
3963 Builder.SetInsertPoint(MTI);
3964 auto *M = Builder.CreateMemSet(
3965 MTI->getRawDest(),
3966 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3967 MTI->getLength(), MTI->getDestAlign());
3968 M->copyMetadata(*MTI);
3969 }
3970 }
3971 }
3972 }
3973 for (WeakTrackingVH &User : Users) {
3974 if (!User)
3975 continue;
3976
3978
3979 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3981 *C, ConstantInt::get(C->getType(), C->isFalseWhenEqual()));
3982 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3983 for (auto *DVR : DVRs)
3984 if (DVR->isAddressOfVariable())
3986 } else {
3987 // Casts, GEP, or anything else: we're about to delete this instruction,
3988 // so it can not have any valid uses.
3990 if (isa<LoadInst>(I)) {
3991 assert(KnowInitZero || KnowInitUndef);
3992 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3993 : Constant::getNullValue(I->getType());
3994 } else
3995 Replace = PoisonValue::get(I->getType());
3997 }
3999 }
4000
4002 // Replace invoke with a NOP intrinsic to maintain the original CFG
4003 Module *M = II->getModule();
4004 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
4005 auto *NewII = InvokeInst::Create(
4006 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
4007 NewII->setDebugLoc(II->getDebugLoc());
4008 }
4009
4010 // Remove debug intrinsics which describe the value contained within the
4011 // alloca. In addition to removing dbg.{declare,addr} which simply point to
4012 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
4013 //
4014 // ```
4015 // define void @foo(i32 %0) {
4016 // %a = alloca i32 ; Deleted.
4017 // store i32 %0, i32* %a
4018 // dbg.value(i32 %0, "arg0") ; Not deleted.
4019 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
4020 // call void @trivially_inlinable_no_op(i32* %a)
4021 // ret void
4022 // }
4023 // ```
4024 //
4025 // This may not be required if we stop describing the contents of allocas
4026 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
4027 // the LowerDbgDeclare utility.
4028 //
4029 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
4030 // "arg0" dbg.value may be stale after the call. However, failing to remove
4031 // the DW_OP_deref dbg.value causes large gaps in location coverage.
4032 //
4033 // FIXME: the Assignment Tracking project has now likely made this
4034 // redundant (and it's sometimes harmful).
4035 for (auto *DVR : DVRs)
4036 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
4037 DVR->eraseFromParent();
4038
4039 return eraseInstFromFunction(MI);
4040 }
4041 return nullptr;
4042}
4043
4044/// Move the call to free before a NULL test.
4045///
4046/// Check if this free is accessed after its argument has been test
4047/// against NULL (property 0).
4048/// If yes, it is legal to move this call in its predecessor block.
4049///
4050/// The move is performed only if the block containing the call to free
4051/// will be removed, i.e.:
4052/// 1. it has only one predecessor P, and P has two successors
4053/// 2. it contains the call, noops, and an unconditional branch
4054/// 3. its successor is the same as its predecessor's successor
4055///
4056/// The profitability is out-of concern here and this function should
4057/// be called only if the caller knows this transformation would be
4058/// profitable (e.g., for code size).
4060 const DataLayout &DL) {
4061 Value *Op = FI.getArgOperand(0);
4062 BasicBlock *FreeInstrBB = FI.getParent();
4063 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
4064
4065 // Validate part of constraint #1: Only one predecessor
4066 // FIXME: We can extend the number of predecessor, but in that case, we
4067 // would duplicate the call to free in each predecessor and it may
4068 // not be profitable even for code size.
4069 if (!PredBB)
4070 return nullptr;
4071
4072 // Validate constraint #2: Does this block contains only the call to
4073 // free, noops, and an unconditional branch?
4074 BasicBlock *SuccBB;
4075 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
4076 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
4077 return nullptr;
4078
4079 // If there are only 2 instructions in the block, at this point,
4080 // this is the call to free and unconditional.
4081 // If there are more than 2 instructions, check that they are noops
4082 // i.e., they won't hurt the performance of the generated code.
4083 if (FreeInstrBB->size() != 2) {
4084 for (const Instruction &Inst : *FreeInstrBB) {
4085 if (&Inst == &FI || &Inst == FreeInstrBBTerminator ||
4087 continue;
4088 auto *Cast = dyn_cast<CastInst>(&Inst);
4089 if (!Cast || !Cast->isNoopCast(DL))
4090 return nullptr;
4091 }
4092 }
4093 // Validate the rest of constraint #1 by matching on the pred branch.
4094 Instruction *TI = PredBB->getTerminator();
4095 BasicBlock *TrueBB, *FalseBB;
4096 CmpPredicate Pred;
4097 if (!match(TI, m_Br(m_ICmp(Pred,
4099 m_Specific(Op->stripPointerCasts())),
4100 m_Zero()),
4101 TrueBB, FalseBB)))
4102 return nullptr;
4103 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
4104 return nullptr;
4105
4106 // Validate constraint #3: Ensure the null case just falls through.
4107 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
4108 return nullptr;
4109 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
4110 "Broken CFG: missing edge from predecessor to successor");
4111
4112 // At this point, we know that everything in FreeInstrBB can be moved
4113 // before TI.
4114 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
4115 if (&Instr == FreeInstrBBTerminator)
4116 break;
4117 Instr.moveBeforePreserving(TI->getIterator());
4118 }
4119 assert(FreeInstrBB->size() == 1 &&
4120 "Only the branch instruction should remain");
4121
4122 // Now that we've moved the call to free before the NULL check, we have to
4123 // remove any attributes on its parameter that imply it's non-null, because
4124 // those attributes might have only been valid because of the NULL check, and
4125 // we can get miscompiles if we keep them. This is conservative if non-null is
4126 // also implied by something other than the NULL check, but it's guaranteed to
4127 // be correct, and the conservativeness won't matter in practice, since the
4128 // attributes are irrelevant for the call to free itself and the pointer
4129 // shouldn't be used after the call.
4130 AttributeList Attrs = FI.getAttributes();
4131 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
4132 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
4133 if (Dereferenceable.isValid()) {
4134 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
4135 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
4136 Attribute::Dereferenceable);
4137 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
4138 }
4139 FI.setAttributes(Attrs);
4140
4141 return &FI;
4142}
4143
4145 // free undef -> unreachable.
4146 if (isa<UndefValue>(Op)) {
4147 // Leave a marker since we can't modify the CFG here.
4149 return eraseInstFromFunction(FI);
4150 }
4151
4152 // If we have 'free null' delete the instruction. This can happen in stl code
4153 // when lots of inlining happens.
4155 return eraseInstFromFunction(FI);
4156
4157 // If we had free(realloc(...)) with no intervening uses, then eliminate the
4158 // realloc() entirely.
4160 if (CI && CI->hasOneUse())
4161 if (Value *ReallocatedOp = getReallocatedOperand(CI))
4162 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
4163
4164 // If we optimize for code size, try to move the call to free before the null
4165 // test so that simplify cfg can remove the empty block and dead code
4166 // elimination the branch. I.e., helps to turn something like:
4167 // if (foo) free(foo);
4168 // into
4169 // free(foo);
4170 //
4171 // Note that we can only do this for 'free' and not for any flavor of
4172 // 'operator delete'; there is no 'operator delete' symbol for which we are
4173 // permitted to invent a call, even if we're passing in a null pointer.
4174 if (MinimizeSize) {
4175 LibFunc Func;
4176 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
4178 return I;
4179 }
4180
4181 return nullptr;
4182}
4183
4185 Value *RetVal = RI.getReturnValue();
4186 if (!RetVal)
4187 return nullptr;
4188
4189 Function *F = RI.getFunction();
4190 Type *RetTy = RetVal->getType();
4191 if (RetTy->isPointerTy()) {
4192 bool HasDereferenceable =
4193 F->getAttributes().getRetDereferenceableBytes() > 0;
4194 if (F->hasRetAttribute(Attribute::NonNull) ||
4195 (HasDereferenceable &&
4197 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4198 return replaceOperand(RI, 0, V);
4199 }
4200 }
4201
4202 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4203 return nullptr;
4204
4205 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
4206 if (ReturnClass == fcNone)
4207 return nullptr;
4208
4209 KnownFPClass KnownClass;
4210 if (SimplifyDemandedFPClass(&RI, 0, ~ReturnClass, KnownClass,
4211 SQ.getWithInstruction(&RI)))
4212 return &RI;
4213
4214 return nullptr;
4215}
4216
4217// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
4219 // Try to remove the previous instruction if it must lead to unreachable.
4220 // This includes instructions like stores and "llvm.assume" that may not get
4221 // removed by simple dead code elimination.
4222 bool Changed = false;
4223 while (Instruction *Prev = I.getPrevNode()) {
4224 // While we theoretically can erase EH, that would result in a block that
4225 // used to start with an EH no longer starting with EH, which is invalid.
4226 // To make it valid, we'd need to fixup predecessors to no longer refer to
4227 // this block, but that changes CFG, which is not allowed in InstCombine.
4228 if (Prev->isEHPad())
4229 break; // Can not drop any more instructions. We're done here.
4230
4232 break; // Can not drop any more instructions. We're done here.
4233 // Otherwise, this instruction can be freely erased,
4234 // even if it is not side-effect free.
4235
4236 // A value may still have uses before we process it here (for example, in
4237 // another unreachable block), so convert those to poison.
4238 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4239 eraseInstFromFunction(*Prev);
4240 Changed = true;
4241 }
4242 return Changed;
4243}
4244
4249
4251 // If this store is the second-to-last instruction in the basic block
4252 // (excluding debug info) and if the block ends with
4253 // an unconditional branch, try to move the store to the successor block.
4254
4255 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4256 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4257 do {
4258 if (BBI != FirstInstr)
4259 --BBI;
4260 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4261
4262 return dyn_cast<StoreInst>(BBI);
4263 };
4264
4265 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4267 return &BI;
4268
4269 return nullptr;
4270}
4271
4274 if (!DeadEdges.insert({From, To}).second)
4275 return;
4276
4277 // Replace phi node operands in successor with poison.
4278 for (PHINode &PN : To->phis())
4279 for (Use &U : PN.incoming_values())
4280 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4281 replaceUse(U, PoisonValue::get(PN.getType()));
4282 addToWorklist(&PN);
4283 MadeIRChange = true;
4284 }
4285
4286 Worklist.push_back(To);
4287}
4288
4289// Under the assumption that I is unreachable, remove it and following
4290// instructions. Changes are reported directly to MadeIRChange.
4293 BasicBlock *BB = I->getParent();
4294 for (Instruction &Inst : make_early_inc_range(
4295 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4296 std::next(I->getReverseIterator())))) {
4297 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4298 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4299 MadeIRChange = true;
4300 }
4301 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4302 continue;
4303 // RemoveDIs: erase debug-info on this instruction manually.
4304 Inst.dropDbgRecords();
4306 MadeIRChange = true;
4307 }
4308
4311 MadeIRChange = true;
4312 for (Value *V : Changed)
4314 }
4315
4316 // Handle potentially dead successors.
4317 for (BasicBlock *Succ : successors(BB))
4318 addDeadEdge(BB, Succ, Worklist);
4319}
4320
4323 while (!Worklist.empty()) {
4324 BasicBlock *BB = Worklist.pop_back_val();
4325 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4326 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4327 }))
4328 continue;
4329
4331 }
4332}
4333
4335 BasicBlock *LiveSucc) {
4337 for (BasicBlock *Succ : successors(BB)) {
4338 // The live successor isn't dead.
4339 if (Succ == LiveSucc)
4340 continue;
4341
4342 addDeadEdge(BB, Succ, Worklist);
4343 }
4344
4346}
4347
4349 // Change br (not X), label True, label False to: br X, label False, True
4350 Value *Cond = BI.getCondition();
4351 Value *X;
4352 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4353 // Swap Destinations and condition...
4354 BI.swapSuccessors();
4355 if (BPI)
4356 BPI->swapSuccEdgesProbabilities(BI.getParent());
4357 return replaceOperand(BI, 0, X);
4358 }
4359
4360 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4361 // This is done by inverting the condition and swapping successors:
4362 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4363 Value *Y;
4364 if (isa<SelectInst>(Cond) &&
4365 match(Cond,
4367 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4368 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4369
4370 // Set weights for the new OR select instruction too.
4372 if (auto *OrInst = dyn_cast<Instruction>(Or)) {
4373 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
4374 SmallVector<uint32_t> Weights;
4375 if (extractBranchWeights(*CondInst, Weights)) {
4376 assert(Weights.size() == 2 &&
4377 "Unexpected number of branch weights!");
4378 std::swap(Weights[0], Weights[1]);
4379 setBranchWeights(*OrInst, Weights, /*IsExpected=*/false);
4380 }
4381 }
4382 }
4383 }
4384 BI.swapSuccessors();
4385 if (BPI)
4386 BPI->swapSuccEdgesProbabilities(BI.getParent());
4387 return replaceOperand(BI, 0, Or);
4388 }
4389
4390 // If the condition is irrelevant, remove the use so that other
4391 // transforms on the condition become more effective.
4392 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4393 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4394
4395 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4396 CmpPredicate Pred;
4397 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4398 !isCanonicalPredicate(Pred)) {
4399 // Swap destinations and condition.
4400 auto *Cmp = cast<CmpInst>(Cond);
4401 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4402 BI.swapSuccessors();
4403 if (BPI)
4404 BPI->swapSuccEdgesProbabilities(BI.getParent());
4405 Worklist.push(Cmp);
4406 return &BI;
4407 }
4408
4409 if (isa<UndefValue>(Cond)) {
4410 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4411 return nullptr;
4412 }
4413 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4414 handlePotentiallyDeadSuccessors(BI.getParent(),
4415 BI.getSuccessor(!CI->getZExtValue()));
4416 return nullptr;
4417 }
4418
4419 // Replace all dominated uses of the condition with true/false
4420 // Ignore constant expressions to avoid iterating over uses on other
4421 // functions.
4422 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4423 for (auto &U : make_early_inc_range(Cond->uses())) {
4424 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4425 if (DT.dominates(Edge0, U)) {
4426 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4427 addToWorklist(cast<Instruction>(U.getUser()));
4428 continue;
4429 }
4430 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4431 if (DT.dominates(Edge1, U)) {
4432 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4433 addToWorklist(cast<Instruction>(U.getUser()));
4434 }
4435 }
4436 }
4437
4438 DC.registerBranch(&BI);
4439 return nullptr;
4440}
4441
4442// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4443// we can prove that both (switch C) and (switch X) go to the default when cond
4444// is false/true.
4447 bool IsTrueArm) {
4448 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4449 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4450 if (!C)
4451 return nullptr;
4452
4453 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4454 if (CstBB != SI.getDefaultDest())
4455 return nullptr;
4456 Value *X = Select->getOperand(3 - CstOpIdx);
4457 CmpPredicate Pred;
4458 const APInt *RHSC;
4459 if (!match(Select->getCondition(),
4460 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4461 return nullptr;
4462 if (IsTrueArm)
4463 Pred = ICmpInst::getInversePredicate(Pred);
4464
4465 // See whether we can replace the select with X
4467 for (auto Case : SI.cases())
4468 if (!CR.contains(Case.getCaseValue()->getValue()))
4469 return nullptr;
4470
4471 return X;
4472}
4473
4475 Value *Cond = SI.getCondition();
4476 Value *Op0;
4477 const APInt *CondOpC;
4478 using InvertFn = std::function<APInt(const APInt &Case, const APInt &C)>;
4479
4480 auto MaybeInvertible = [&](Value *Cond) -> InvertFn {
4481 if (match(Cond, m_Add(m_Value(Op0), m_APInt(CondOpC))))
4482 // Change 'switch (X+C) case Case:' into 'switch (X) case Case-C'.
4483 return [](const APInt &Case, const APInt &C) { return Case - C; };
4484
4485 if (match(Cond, m_Sub(m_APInt(CondOpC), m_Value(Op0))))
4486 // Change 'switch (C-X) case Case:' into 'switch (X) case C-Case'.
4487 return [](const APInt &Case, const APInt &C) { return C - Case; };
4488
4489 if (match(Cond, m_Xor(m_Value(Op0), m_APInt(CondOpC))) &&
4490 !CondOpC->isMinSignedValue() && !CondOpC->isMaxSignedValue())
4491 // Change 'switch (X^C) case Case:' into 'switch (X) case Case^C'.
4492 // Prevent creation of large case values by excluding extremes.
4493 return [](const APInt &Case, const APInt &C) { return Case ^ C; };
4494
4495 return nullptr;
4496 };
4497
4498 // Attempt to invert and simplify the switch condition, as long as the
4499 // condition is not used further, as it may not be profitable otherwise.
4500 if (auto InvertFn = MaybeInvertible(Cond); InvertFn && Cond->hasOneUse()) {
4501 for (auto &Case : SI.cases()) {
4502 const APInt &New = InvertFn(Case.getCaseValue()->getValue(), *CondOpC);
4503 Case.setValue(ConstantInt::get(SI.getContext(), New));
4504 }
4505 return replaceOperand(SI, 0, Op0);
4506 }
4507
4508 uint64_t ShiftAmt;
4509 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4510 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4511 all_of(SI.cases(), [&](const auto &Case) {
4512 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4513 })) {
4514 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4516 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4517 Shl->hasOneUse()) {
4518 Value *NewCond = Op0;
4519 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4520 // If the shift may wrap, we need to mask off the shifted bits.
4521 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4522 NewCond = Builder.CreateAnd(
4523 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4524 }
4525 for (auto Case : SI.cases()) {
4526 const APInt &CaseVal = Case.getCaseValue()->getValue();
4527 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4528 : CaseVal.lshr(ShiftAmt);
4529 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4530 }
4531 return replaceOperand(SI, 0, NewCond);
4532 }
4533 }
4534
4535 // Fold switch(zext/sext(X)) into switch(X) if possible.
4536 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4537 bool IsZExt = isa<ZExtInst>(Cond);
4538 Type *SrcTy = Op0->getType();
4539 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4540
4541 if (all_of(SI.cases(), [&](const auto &Case) {
4542 const APInt &CaseVal = Case.getCaseValue()->getValue();
4543 return IsZExt ? CaseVal.isIntN(NewWidth)
4544 : CaseVal.isSignedIntN(NewWidth);
4545 })) {
4546 for (auto &Case : SI.cases()) {
4547 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4548 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4549 }
4550 return replaceOperand(SI, 0, Op0);
4551 }
4552 }
4553
4554 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4555 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4556 if (Value *V =
4557 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4558 return replaceOperand(SI, 0, V);
4559 if (Value *V =
4560 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4561 return replaceOperand(SI, 0, V);
4562 }
4563
4564 KnownBits Known = computeKnownBits(Cond, &SI);
4565 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4566 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4567
4568 // Compute the number of leading bits we can ignore.
4569 // TODO: A better way to determine this would use ComputeNumSignBits().
4570 for (const auto &C : SI.cases()) {
4571 LeadingKnownZeros =
4572 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4573 LeadingKnownOnes =
4574 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4575 }
4576
4577 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4578
4579 // Shrink the condition operand if the new type is smaller than the old type.
4580 // But do not shrink to a non-standard type, because backend can't generate
4581 // good code for that yet.
4582 // TODO: We can make it aggressive again after fixing PR39569.
4583 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4584 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4585 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4586 Builder.SetInsertPoint(&SI);
4587 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4588
4589 for (auto Case : SI.cases()) {
4590 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4591 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4592 }
4593 return replaceOperand(SI, 0, NewCond);
4594 }
4595
4596 if (isa<UndefValue>(Cond)) {
4597 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4598 return nullptr;
4599 }
4600 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4602 SI.findCaseValue(CI)->getCaseSuccessor());
4603 return nullptr;
4604 }
4605
4606 return nullptr;
4607}
4608
4610InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4612 if (!WO)
4613 return nullptr;
4614
4615 Intrinsic::ID OvID = WO->getIntrinsicID();
4616 const APInt *C = nullptr;
4617 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4618 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4619 OvID == Intrinsic::umul_with_overflow)) {
4620 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4621 if (C->isAllOnes())
4622 return BinaryOperator::CreateNeg(WO->getLHS());
4623 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4624 if (C->isPowerOf2()) {
4625 return BinaryOperator::CreateShl(
4626 WO->getLHS(),
4627 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4628 }
4629 }
4630 }
4631
4632 // We're extracting from an overflow intrinsic. See if we're the only user.
4633 // That allows us to simplify multiple result intrinsics to simpler things
4634 // that just get one value.
4635 if (!WO->hasOneUse())
4636 return nullptr;
4637
4638 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4639 // and replace it with a traditional binary instruction.
4640 if (*EV.idx_begin() == 0) {
4641 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4642 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4643 // Replace the old instruction's uses with poison.
4644 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4646 return BinaryOperator::Create(BinOp, LHS, RHS);
4647 }
4648
4649 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4650
4651 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4652 if (OvID == Intrinsic::usub_with_overflow)
4653 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4654
4655 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4656 // +1 is not possible because we assume signed values.
4657 if (OvID == Intrinsic::smul_with_overflow &&
4658 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4659 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4660
4661 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4662 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4663 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4664 // Only handle even bitwidths for performance reasons.
4665 if (BitWidth % 2 == 0)
4666 return new ICmpInst(
4667 ICmpInst::ICMP_UGT, WO->getLHS(),
4668 ConstantInt::get(WO->getLHS()->getType(),
4670 }
4671
4672 // If only the overflow result is used, and the right hand side is a
4673 // constant (or constant splat), we can remove the intrinsic by directly
4674 // checking for overflow.
4675 if (C) {
4676 // Compute the no-wrap range for LHS given RHS=C, then construct an
4677 // equivalent icmp, potentially using an offset.
4678 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4679 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4680
4681 CmpInst::Predicate Pred;
4682 APInt NewRHSC, Offset;
4683 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4684 auto *OpTy = WO->getRHS()->getType();
4685 auto *NewLHS = WO->getLHS();
4686 if (Offset != 0)
4687 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4688 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4689 ConstantInt::get(OpTy, NewRHSC));
4690 }
4691
4692 return nullptr;
4693}
4694
4697 InstCombiner::BuilderTy &Builder) {
4698 // Helper to fold frexp of select to select of frexp.
4699
4700 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4701 return nullptr;
4703 Value *TrueVal = SelectInst->getTrueValue();
4704 Value *FalseVal = SelectInst->getFalseValue();
4705
4706 const APFloat *ConstVal = nullptr;
4707 Value *VarOp = nullptr;
4708 bool ConstIsTrue = false;
4709
4710 if (match(TrueVal, m_APFloat(ConstVal))) {
4711 VarOp = FalseVal;
4712 ConstIsTrue = true;
4713 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4714 VarOp = TrueVal;
4715 ConstIsTrue = false;
4716 } else {
4717 return nullptr;
4718 }
4719
4720 Builder.SetInsertPoint(&EV);
4721
4722 CallInst *NewFrexp =
4723 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4724 NewFrexp->copyIRFlags(FrexpCall);
4725
4726 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4727
4728 int Exp;
4729 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4730
4731 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4732
4733 Value *NewSel = Builder.CreateSelectFMF(
4734 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4735 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4736 return NewSel;
4737}
4739 Value *Agg = EV.getAggregateOperand();
4740
4741 if (!EV.hasIndices())
4742 return replaceInstUsesWith(EV, Agg);
4743
4744 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4745 SQ.getWithInstruction(&EV)))
4746 return replaceInstUsesWith(EV, V);
4747
4748 Value *Cond, *TrueVal, *FalseVal;
4750 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4751 auto *SelInst =
4752 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4753 if (Value *Result =
4754 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4755 return replaceInstUsesWith(EV, Result);
4756 }
4758 // We're extracting from an insertvalue instruction, compare the indices
4759 const unsigned *exti, *exte, *insi, *inse;
4760 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4761 exte = EV.idx_end(), inse = IV->idx_end();
4762 exti != exte && insi != inse;
4763 ++exti, ++insi) {
4764 if (*insi != *exti)
4765 // The insert and extract both reference distinctly different elements.
4766 // This means the extract is not influenced by the insert, and we can
4767 // replace the aggregate operand of the extract with the aggregate
4768 // operand of the insert. i.e., replace
4769 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4770 // %E = extractvalue { i32, { i32 } } %I, 0
4771 // with
4772 // %E = extractvalue { i32, { i32 } } %A, 0
4773 return ExtractValueInst::Create(IV->getAggregateOperand(),
4774 EV.getIndices());
4775 }
4776 if (exti == exte && insi == inse)
4777 // Both iterators are at the end: Index lists are identical. Replace
4778 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4779 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4780 // with "i32 42"
4781 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4782 if (exti == exte) {
4783 // The extract list is a prefix of the insert list. i.e. replace
4784 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4785 // %E = extractvalue { i32, { i32 } } %I, 1
4786 // with
4787 // %X = extractvalue { i32, { i32 } } %A, 1
4788 // %E = insertvalue { i32 } %X, i32 42, 0
4789 // by switching the order of the insert and extract (though the
4790 // insertvalue should be left in, since it may have other uses).
4791 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4792 EV.getIndices());
4793 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4794 ArrayRef(insi, inse));
4795 }
4796 if (insi == inse)
4797 // The insert list is a prefix of the extract list
4798 // We can simply remove the common indices from the extract and make it
4799 // operate on the inserted value instead of the insertvalue result.
4800 // i.e., replace
4801 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4802 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4803 // with
4804 // %E extractvalue { i32 } { i32 42 }, 0
4805 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4806 ArrayRef(exti, exte));
4807 }
4808
4809 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4810 return R;
4811
4812 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4813 // Bail out if the aggregate contains scalable vector type
4814 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4815 STy && STy->isScalableTy())
4816 return nullptr;
4817
4818 // If the (non-volatile) load only has one use, we can rewrite this to a
4819 // load from a GEP. This reduces the size of the load. If a load is used
4820 // only by extractvalue instructions then this either must have been
4821 // optimized before, or it is a struct with padding, in which case we
4822 // don't want to do the transformation as it loses padding knowledge.
4823 if (L->isSimple() && L->hasOneUse()) {
4824 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4825 SmallVector<Value*, 4> Indices;
4826 // Prefix an i32 0 since we need the first element.
4827 Indices.push_back(Builder.getInt32(0));
4828 for (unsigned Idx : EV.indices())
4829 Indices.push_back(Builder.getInt32(Idx));
4830
4831 // We need to insert these at the location of the old load, not at that of
4832 // the extractvalue.
4833 Builder.SetInsertPoint(L);
4834 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4835 L->getPointerOperand(), Indices);
4836 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4837 // Whatever aliasing information we had for the orignal load must also
4838 // hold for the smaller load, so propagate the annotations.
4839 NL->setAAMetadata(L->getAAMetadata());
4840 // Returning the load directly will cause the main loop to insert it in
4841 // the wrong spot, so use replaceInstUsesWith().
4842 return replaceInstUsesWith(EV, NL);
4843 }
4844 }
4845
4846 if (auto *PN = dyn_cast<PHINode>(Agg))
4847 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4848 return Res;
4849
4850 // Canonicalize extract (select Cond, TV, FV)
4851 // -> select cond, (extract TV), (extract FV)
4852 if (auto *SI = dyn_cast<SelectInst>(Agg))
4853 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4854 return R;
4855
4856 // We could simplify extracts from other values. Note that nested extracts may
4857 // already be simplified implicitly by the above: extract (extract (insert) )
4858 // will be translated into extract ( insert ( extract ) ) first and then just
4859 // the value inserted, if appropriate. Similarly for extracts from single-use
4860 // loads: extract (extract (load)) will be translated to extract (load (gep))
4861 // and if again single-use then via load (gep (gep)) to load (gep).
4862 // However, double extracts from e.g. function arguments or return values
4863 // aren't handled yet.
4864 return nullptr;
4865}
4866
4867/// Return 'true' if the given typeinfo will match anything.
4868static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4869 switch (Personality) {
4873 // The GCC C EH and Rust personality only exists to support cleanups, so
4874 // it's not clear what the semantics of catch clauses are.
4875 return false;
4877 return false;
4879 // While __gnat_all_others_value will match any Ada exception, it doesn't
4880 // match foreign exceptions (or didn't, before gcc-4.7).
4881 return false;
4892 return TypeInfo->isNullValue();
4893 }
4894 llvm_unreachable("invalid enum");
4895}
4896
4897static bool shorter_filter(const Value *LHS, const Value *RHS) {
4898 return
4899 cast<ArrayType>(LHS->getType())->getNumElements()
4900 <
4901 cast<ArrayType>(RHS->getType())->getNumElements();
4902}
4903
4905 // The logic here should be correct for any real-world personality function.
4906 // However if that turns out not to be true, the offending logic can always
4907 // be conditioned on the personality function, like the catch-all logic is.
4908 EHPersonality Personality =
4909 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4910
4911 // Simplify the list of clauses, eg by removing repeated catch clauses
4912 // (these are often created by inlining).
4913 bool MakeNewInstruction = false; // If true, recreate using the following:
4914 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4915 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4916
4917 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4918 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4919 bool isLastClause = i + 1 == e;
4920 if (LI.isCatch(i)) {
4921 // A catch clause.
4922 Constant *CatchClause = LI.getClause(i);
4923 Constant *TypeInfo = CatchClause->stripPointerCasts();
4924
4925 // If we already saw this clause, there is no point in having a second
4926 // copy of it.
4927 if (AlreadyCaught.insert(TypeInfo).second) {
4928 // This catch clause was not already seen.
4929 NewClauses.push_back(CatchClause);
4930 } else {
4931 // Repeated catch clause - drop the redundant copy.
4932 MakeNewInstruction = true;
4933 }
4934
4935 // If this is a catch-all then there is no point in keeping any following
4936 // clauses or marking the landingpad as having a cleanup.
4937 if (isCatchAll(Personality, TypeInfo)) {
4938 if (!isLastClause)
4939 MakeNewInstruction = true;
4940 CleanupFlag = false;
4941 break;
4942 }
4943 } else {
4944 // A filter clause. If any of the filter elements were already caught
4945 // then they can be dropped from the filter. It is tempting to try to
4946 // exploit the filter further by saying that any typeinfo that does not
4947 // occur in the filter can't be caught later (and thus can be dropped).
4948 // However this would be wrong, since typeinfos can match without being
4949 // equal (for example if one represents a C++ class, and the other some
4950 // class derived from it).
4951 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4952 Constant *FilterClause = LI.getClause(i);
4953 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4954 unsigned NumTypeInfos = FilterType->getNumElements();
4955
4956 // An empty filter catches everything, so there is no point in keeping any
4957 // following clauses or marking the landingpad as having a cleanup. By
4958 // dealing with this case here the following code is made a bit simpler.
4959 if (!NumTypeInfos) {
4960 NewClauses.push_back(FilterClause);
4961 if (!isLastClause)
4962 MakeNewInstruction = true;
4963 CleanupFlag = false;
4964 break;
4965 }
4966
4967 bool MakeNewFilter = false; // If true, make a new filter.
4968 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4969 if (isa<ConstantAggregateZero>(FilterClause)) {
4970 // Not an empty filter - it contains at least one null typeinfo.
4971 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4972 Constant *TypeInfo =
4974 // If this typeinfo is a catch-all then the filter can never match.
4975 if (isCatchAll(Personality, TypeInfo)) {
4976 // Throw the filter away.
4977 MakeNewInstruction = true;
4978 continue;
4979 }
4980
4981 // There is no point in having multiple copies of this typeinfo, so
4982 // discard all but the first copy if there is more than one.
4983 NewFilterElts.push_back(TypeInfo);
4984 if (NumTypeInfos > 1)
4985 MakeNewFilter = true;
4986 } else {
4987 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4988 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4989 NewFilterElts.reserve(NumTypeInfos);
4990
4991 // Remove any filter elements that were already caught or that already
4992 // occurred in the filter. While there, see if any of the elements are
4993 // catch-alls. If so, the filter can be discarded.
4994 bool SawCatchAll = false;
4995 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4996 Constant *Elt = Filter->getOperand(j);
4997 Constant *TypeInfo = Elt->stripPointerCasts();
4998 if (isCatchAll(Personality, TypeInfo)) {
4999 // This element is a catch-all. Bail out, noting this fact.
5000 SawCatchAll = true;
5001 break;
5002 }
5003
5004 // Even if we've seen a type in a catch clause, we don't want to
5005 // remove it from the filter. An unexpected type handler may be
5006 // set up for a call site which throws an exception of the same
5007 // type caught. In order for the exception thrown by the unexpected
5008 // handler to propagate correctly, the filter must be correctly
5009 // described for the call site.
5010 //
5011 // Example:
5012 //
5013 // void unexpected() { throw 1;}
5014 // void foo() throw (int) {
5015 // std::set_unexpected(unexpected);
5016 // try {
5017 // throw 2.0;
5018 // } catch (int i) {}
5019 // }
5020
5021 // There is no point in having multiple copies of the same typeinfo in
5022 // a filter, so only add it if we didn't already.
5023 if (SeenInFilter.insert(TypeInfo).second)
5024 NewFilterElts.push_back(cast<Constant>(Elt));
5025 }
5026 // A filter containing a catch-all cannot match anything by definition.
5027 if (SawCatchAll) {
5028 // Throw the filter away.
5029 MakeNewInstruction = true;
5030 continue;
5031 }
5032
5033 // If we dropped something from the filter, make a new one.
5034 if (NewFilterElts.size() < NumTypeInfos)
5035 MakeNewFilter = true;
5036 }
5037 if (MakeNewFilter) {
5038 FilterType = ArrayType::get(FilterType->getElementType(),
5039 NewFilterElts.size());
5040 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
5041 MakeNewInstruction = true;
5042 }
5043
5044 NewClauses.push_back(FilterClause);
5045
5046 // If the new filter is empty then it will catch everything so there is
5047 // no point in keeping any following clauses or marking the landingpad
5048 // as having a cleanup. The case of the original filter being empty was
5049 // already handled above.
5050 if (MakeNewFilter && !NewFilterElts.size()) {
5051 assert(MakeNewInstruction && "New filter but not a new instruction!");
5052 CleanupFlag = false;
5053 break;
5054 }
5055 }
5056 }
5057
5058 // If several filters occur in a row then reorder them so that the shortest
5059 // filters come first (those with the smallest number of elements). This is
5060 // advantageous because shorter filters are more likely to match, speeding up
5061 // unwinding, but mostly because it increases the effectiveness of the other
5062 // filter optimizations below.
5063 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
5064 unsigned j;
5065 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
5066 for (j = i; j != e; ++j)
5067 if (!isa<ArrayType>(NewClauses[j]->getType()))
5068 break;
5069
5070 // Check whether the filters are already sorted by length. We need to know
5071 // if sorting them is actually going to do anything so that we only make a
5072 // new landingpad instruction if it does.
5073 for (unsigned k = i; k + 1 < j; ++k)
5074 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
5075 // Not sorted, so sort the filters now. Doing an unstable sort would be
5076 // correct too but reordering filters pointlessly might confuse users.
5077 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
5079 MakeNewInstruction = true;
5080 break;
5081 }
5082
5083 // Look for the next batch of filters.
5084 i = j + 1;
5085 }
5086
5087 // If typeinfos matched if and only if equal, then the elements of a filter L
5088 // that occurs later than a filter F could be replaced by the intersection of
5089 // the elements of F and L. In reality two typeinfos can match without being
5090 // equal (for example if one represents a C++ class, and the other some class
5091 // derived from it) so it would be wrong to perform this transform in general.
5092 // However the transform is correct and useful if F is a subset of L. In that
5093 // case L can be replaced by F, and thus removed altogether since repeating a
5094 // filter is pointless. So here we look at all pairs of filters F and L where
5095 // L follows F in the list of clauses, and remove L if every element of F is
5096 // an element of L. This can occur when inlining C++ functions with exception
5097 // specifications.
5098 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
5099 // Examine each filter in turn.
5100 Value *Filter = NewClauses[i];
5101 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
5102 if (!FTy)
5103 // Not a filter - skip it.
5104 continue;
5105 unsigned FElts = FTy->getNumElements();
5106 // Examine each filter following this one. Doing this backwards means that
5107 // we don't have to worry about filters disappearing under us when removed.
5108 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
5109 Value *LFilter = NewClauses[j];
5110 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
5111 if (!LTy)
5112 // Not a filter - skip it.
5113 continue;
5114 // If Filter is a subset of LFilter, i.e. every element of Filter is also
5115 // an element of LFilter, then discard LFilter.
5116 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
5117 // If Filter is empty then it is a subset of LFilter.
5118 if (!FElts) {
5119 // Discard LFilter.
5120 NewClauses.erase(J);
5121 MakeNewInstruction = true;
5122 // Move on to the next filter.
5123 continue;
5124 }
5125 unsigned LElts = LTy->getNumElements();
5126 // If Filter is longer than LFilter then it cannot be a subset of it.
5127 if (FElts > LElts)
5128 // Move on to the next filter.
5129 continue;
5130 // At this point we know that LFilter has at least one element.
5131 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
5132 // Filter is a subset of LFilter iff Filter contains only zeros (as we
5133 // already know that Filter is not longer than LFilter).
5135 assert(FElts <= LElts && "Should have handled this case earlier!");
5136 // Discard LFilter.
5137 NewClauses.erase(J);
5138 MakeNewInstruction = true;
5139 }
5140 // Move on to the next filter.
5141 continue;
5142 }
5143 ConstantArray *LArray = cast<ConstantArray>(LFilter);
5144 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
5145 // Since Filter is non-empty and contains only zeros, it is a subset of
5146 // LFilter iff LFilter contains a zero.
5147 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
5148 for (unsigned l = 0; l != LElts; ++l)
5149 if (LArray->getOperand(l)->isNullValue()) {
5150 // LFilter contains a zero - discard it.
5151 NewClauses.erase(J);
5152 MakeNewInstruction = true;
5153 break;
5154 }
5155 // Move on to the next filter.
5156 continue;
5157 }
5158 // At this point we know that both filters are ConstantArrays. Loop over
5159 // operands to see whether every element of Filter is also an element of
5160 // LFilter. Since filters tend to be short this is probably faster than
5161 // using a method that scales nicely.
5163 bool AllFound = true;
5164 for (unsigned f = 0; f != FElts; ++f) {
5165 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
5166 AllFound = false;
5167 for (unsigned l = 0; l != LElts; ++l) {
5168 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
5169 if (LTypeInfo == FTypeInfo) {
5170 AllFound = true;
5171 break;
5172 }
5173 }
5174 if (!AllFound)
5175 break;
5176 }
5177 if (AllFound) {
5178 // Discard LFilter.
5179 NewClauses.erase(J);
5180 MakeNewInstruction = true;
5181 }
5182 // Move on to the next filter.
5183 }
5184 }
5185
5186 // If we changed any of the clauses, replace the old landingpad instruction
5187 // with a new one.
5188 if (MakeNewInstruction) {
5190 NewClauses.size());
5191 for (Constant *C : NewClauses)
5192 NLI->addClause(C);
5193 // A landing pad with no clauses must have the cleanup flag set. It is
5194 // theoretically possible, though highly unlikely, that we eliminated all
5195 // clauses. If so, force the cleanup flag to true.
5196 if (NewClauses.empty())
5197 CleanupFlag = true;
5198 NLI->setCleanup(CleanupFlag);
5199 return NLI;
5200 }
5201
5202 // Even if none of the clauses changed, we may nonetheless have understood
5203 // that the cleanup flag is pointless. Clear it if so.
5204 if (LI.isCleanup() != CleanupFlag) {
5205 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
5206 LI.setCleanup(CleanupFlag);
5207 return &LI;
5208 }
5209
5210 return nullptr;
5211}
5212
5213Value *
5215 // Try to push freeze through instructions that propagate but don't produce
5216 // poison as far as possible. If an operand of freeze follows three
5217 // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
5218 // guaranteed-non-poison operands then push the freeze through to the one
5219 // operand that is not guaranteed non-poison. The actual transform is as
5220 // follows.
5221 // Op1 = ... ; Op1 can be posion
5222 // Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
5223 // ; single guaranteed-non-poison operands
5224 // ... = Freeze(Op0)
5225 // =>
5226 // Op1 = ...
5227 // Op1.fr = Freeze(Op1)
5228 // ... = Inst(Op1.fr, NonPoisonOps...)
5229 auto *OrigOp = OrigFI.getOperand(0);
5230 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
5231
5232 // While we could change the other users of OrigOp to use freeze(OrigOp), that
5233 // potentially reduces their optimization potential, so let's only do this iff
5234 // the OrigOp is only used by the freeze.
5235 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
5236 return nullptr;
5237
5238 // We can't push the freeze through an instruction which can itself create
5239 // poison. If the only source of new poison is flags, we can simply
5240 // strip them (since we know the only use is the freeze and nothing can
5241 // benefit from them.)
5243 /*ConsiderFlagsAndMetadata*/ false))
5244 return nullptr;
5245
5246 // If operand is guaranteed not to be poison, there is no need to add freeze
5247 // to the operand. So we first find the operand that is not guaranteed to be
5248 // poison.
5249 Value *MaybePoisonOperand = nullptr;
5250 for (Value *V : OrigOpInst->operands()) {
5252 // Treat identical operands as a single operand.
5253 (MaybePoisonOperand && MaybePoisonOperand == V))
5254 continue;
5255 if (!MaybePoisonOperand)
5256 MaybePoisonOperand = V;
5257 else
5258 return nullptr;
5259 }
5260
5261 OrigOpInst->dropPoisonGeneratingAnnotations();
5262
5263 // If all operands are guaranteed to be non-poison, we can drop freeze.
5264 if (!MaybePoisonOperand)
5265 return OrigOp;
5266
5267 Builder.SetInsertPoint(OrigOpInst);
5268 Value *FrozenMaybePoisonOperand = Builder.CreateFreeze(
5269 MaybePoisonOperand, MaybePoisonOperand->getName() + ".fr");
5270
5271 OrigOpInst->replaceUsesOfWith(MaybePoisonOperand, FrozenMaybePoisonOperand);
5272 return OrigOp;
5273}
5274
5276 PHINode *PN) {
5277 // Detect whether this is a recurrence with a start value and some number of
5278 // backedge values. We'll check whether we can push the freeze through the
5279 // backedge values (possibly dropping poison flags along the way) until we
5280 // reach the phi again. In that case, we can move the freeze to the start
5281 // value.
5282 Use *StartU = nullptr;
5284 for (Use &U : PN->incoming_values()) {
5285 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5286 // Add backedge value to worklist.
5287 Worklist.push_back(U.get());
5288 continue;
5289 }
5290
5291 // Don't bother handling multiple start values.
5292 if (StartU)
5293 return nullptr;
5294 StartU = &U;
5295 }
5296
5297 if (!StartU || Worklist.empty())
5298 return nullptr; // Not a recurrence.
5299
5300 Value *StartV = StartU->get();
5301 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5302 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5303 // We can't insert freeze if the start value is the result of the
5304 // terminator (e.g. an invoke).
5305 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5306 return nullptr;
5307
5310 while (!Worklist.empty()) {
5311 Value *V = Worklist.pop_back_val();
5312 if (!Visited.insert(V).second)
5313 continue;
5314
5315 if (Visited.size() > 32)
5316 return nullptr; // Limit the total number of values we inspect.
5317
5318 // Assume that PN is non-poison, because it will be after the transform.
5319 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5320 continue;
5321
5324 /*ConsiderFlagsAndMetadata*/ false))
5325 return nullptr;
5326
5327 DropFlags.push_back(I);
5328 append_range(Worklist, I->operands());
5329 }
5330
5331 for (Instruction *I : DropFlags)
5332 I->dropPoisonGeneratingAnnotations();
5333
5334 if (StartNeedsFreeze) {
5335 Builder.SetInsertPoint(StartBB->getTerminator());
5336 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5337 StartV->getName() + ".fr");
5338 replaceUse(*StartU, FrozenStartV);
5339 }
5340 return replaceInstUsesWith(FI, PN);
5341}
5342
5344 Value *Op = FI.getOperand(0);
5345
5346 if (isa<Constant>(Op) || Op->hasOneUse())
5347 return false;
5348
5349 // Move the freeze directly after the definition of its operand, so that
5350 // it dominates the maximum number of uses. Note that it may not dominate
5351 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5352 // the normal/default destination. This is why the domination check in the
5353 // replacement below is still necessary.
5354 BasicBlock::iterator MoveBefore;
5355 if (isa<Argument>(Op)) {
5356 MoveBefore =
5358 } else {
5359 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5360 if (!MoveBeforeOpt)
5361 return false;
5362 MoveBefore = *MoveBeforeOpt;
5363 }
5364
5365 // Re-point iterator to come after any debug-info records.
5366 MoveBefore.setHeadBit(false);
5367
5368 bool Changed = false;
5369 if (&FI != &*MoveBefore) {
5370 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5371 Changed = true;
5372 }
5373
5375 Changed |= Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5376 if (!DT.dominates(&FI, U))
5377 return false;
5378
5379 Users.push_back(U.getUser());
5380 return true;
5381 });
5382
5383 for (auto *U : Users) {
5384 for (auto &AssumeVH : AC.assumptionsFor(U)) {
5385 if (!AssumeVH)
5386 continue;
5387 AC.updateAffectedValues(cast<AssumeInst>(AssumeVH));
5388 }
5389 }
5390
5391 return Changed;
5392}
5393
5394// Check if any direct or bitcast user of this value is a shuffle instruction.
5396 for (auto *U : V->users()) {
5398 return true;
5399 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5400 return true;
5401 }
5402 return false;
5403}
5404
5406 Value *Op0 = I.getOperand(0);
5407
5408 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5409 return replaceInstUsesWith(I, V);
5410
5411 // freeze (phi const, x) --> phi const, (freeze x)
5412 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5413 if (Instruction *NV = foldOpIntoPhi(I, PN))
5414 return NV;
5415 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5416 return NV;
5417 }
5418
5420 return replaceInstUsesWith(I, NI);
5421
5422 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5423 // - or: pick -1
5424 // - select's condition: if the true value is constant, choose it by making
5425 // the condition true.
5426 // - phi: pick the common constant across operands
5427 // - default: pick 0
5428 //
5429 // Note that this transform is intentionally done here rather than
5430 // via an analysis in InstSimplify or at individual user sites. That is
5431 // because we must produce the same value for all uses of the freeze -
5432 // it's the reason "freeze" exists!
5433 //
5434 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5435 // duplicating logic for binops at least.
5436 auto getUndefReplacement = [&](Type *Ty) {
5437 auto pickCommonConstantFromPHI = [](PHINode &PN) -> Value * {
5438 // phi(freeze(undef), C, C). Choose C for freeze so the PHI can be
5439 // removed.
5440 Constant *BestValue = nullptr;
5441 for (Value *V : PN.incoming_values()) {
5442 if (match(V, m_Freeze(m_Undef())))
5443 continue;
5444
5446 if (!C)
5447 return nullptr;
5448
5450 return nullptr;
5451
5452 if (BestValue && BestValue != C)
5453 return nullptr;
5454
5455 BestValue = C;
5456 }
5457 return BestValue;
5458 };
5459
5460 Value *NullValue = Constant::getNullValue(Ty);
5461 Value *BestValue = nullptr;
5462 for (auto *U : I.users()) {
5463 Value *V = NullValue;
5464 if (match(U, m_Or(m_Value(), m_Value())))
5466 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5467 V = ConstantInt::getTrue(Ty);
5468 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5469 if (V == &I || !isGuaranteedNotToBeUndefOrPoison(V, &AC, &I, &DT))
5470 V = NullValue;
5471 } else if (auto *PHI = dyn_cast<PHINode>(U)) {
5472 if (Value *MaybeV = pickCommonConstantFromPHI(*PHI))
5473 V = MaybeV;
5474 }
5475
5476 if (!BestValue)
5477 BestValue = V;
5478 else if (BestValue != V)
5479 BestValue = NullValue;
5480 }
5481 assert(BestValue && "Must have at least one use");
5482 assert(BestValue != &I && "Cannot replace with itself");
5483 return BestValue;
5484 };
5485
5486 if (match(Op0, m_Undef())) {
5487 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5488 // a shuffle. This may improve codegen for shuffles that allow
5489 // unspecified inputs.
5491 return nullptr;
5492 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5493 }
5494
5495 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5496 Type *Ty = C->getType();
5497 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5498 if (!VTy)
5499 return nullptr;
5500 unsigned NumElts = VTy->getNumElements();
5501 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5502 for (unsigned i = 0; i != NumElts; ++i) {
5503 Constant *EltC = C->getAggregateElement(i);
5504 if (EltC && !match(EltC, m_Undef())) {
5505 BestValue = EltC;
5506 break;
5507 }
5508 }
5509 return Constant::replaceUndefsWith(C, BestValue);
5510 };
5511
5512 Constant *C;
5513 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5514 !C->containsConstantExpression()) {
5515 if (Constant *Repl = getFreezeVectorReplacement(C))
5516 return replaceInstUsesWith(I, Repl);
5517 }
5518
5519 // Replace uses of Op with freeze(Op).
5520 if (freezeOtherUses(I))
5521 return &I;
5522
5523 return nullptr;
5524}
5525
5526/// Check for case where the call writes to an otherwise dead alloca. This
5527/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5528/// helper *only* analyzes the write; doesn't check any other legality aspect.
5530 auto *CB = dyn_cast<CallBase>(I);
5531 if (!CB)
5532 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5533 // to allow reload along used path as described below. Otherwise, this
5534 // is simply a store to a dead allocation which will be removed.
5535 return false;
5536 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5537 if (!Dest)
5538 return false;
5539 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5540 if (!AI)
5541 // TODO: allow malloc?
5542 return false;
5543 // TODO: allow memory access dominated by move point? Note that since AI
5544 // could have a reference to itself captured by the call, we would need to
5545 // account for cycles in doing so.
5546 SmallVector<const User *> AllocaUsers;
5548 auto pushUsers = [&](const Instruction &I) {
5549 for (const User *U : I.users()) {
5550 if (Visited.insert(U).second)
5551 AllocaUsers.push_back(U);
5552 }
5553 };
5554 pushUsers(*AI);
5555 while (!AllocaUsers.empty()) {
5556 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5557 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5558 pushUsers(*UserI);
5559 continue;
5560 }
5561 if (UserI == CB)
5562 continue;
5563 // TODO: support lifetime.start/end here
5564 return false;
5565 }
5566 return true;
5567}
5568
5569/// Try to move the specified instruction from its current block into the
5570/// beginning of DestBlock, which can only happen if it's safe to move the
5571/// instruction past all of the instructions between it and the end of its
5572/// block.
5574 BasicBlock *DestBlock) {
5575 BasicBlock *SrcBlock = I->getParent();
5576
5577 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5578 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5579 I->isTerminator())
5580 return false;
5581
5582 // Do not sink static or dynamic alloca instructions. Static allocas must
5583 // remain in the entry block, and dynamic allocas must not be sunk in between
5584 // a stacksave / stackrestore pair, which would incorrectly shorten its
5585 // lifetime.
5586 if (isa<AllocaInst>(I))
5587 return false;
5588
5589 // Do not sink into catchswitch blocks.
5590 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5591 return false;
5592
5593 // Do not sink convergent call instructions.
5594 if (auto *CI = dyn_cast<CallInst>(I)) {
5595 if (CI->isConvergent())
5596 return false;
5597 }
5598
5599 // Unless we can prove that the memory write isn't visibile except on the
5600 // path we're sinking to, we must bail.
5601 if (I->mayWriteToMemory()) {
5602 if (!SoleWriteToDeadLocal(I, TLI))
5603 return false;
5604 }
5605
5606 // We can only sink load instructions if there is nothing between the load and
5607 // the end of block that could change the value.
5608 if (I->mayReadFromMemory() &&
5609 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5610 // We don't want to do any sophisticated alias analysis, so we only check
5611 // the instructions after I in I's parent block if we try to sink to its
5612 // successor block.
5613 if (DestBlock->getUniquePredecessor() != I->getParent())
5614 return false;
5615 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5616 E = I->getParent()->end();
5617 Scan != E; ++Scan)
5618 if (Scan->mayWriteToMemory())
5619 return false;
5620 }
5621
5622 I->dropDroppableUses([&](const Use *U) {
5623 auto *I = dyn_cast<Instruction>(U->getUser());
5624 if (I && I->getParent() != DestBlock) {
5625 Worklist.add(I);
5626 return true;
5627 }
5628 return false;
5629 });
5630 /// FIXME: We could remove droppable uses that are not dominated by
5631 /// the new position.
5632
5633 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5634 I->moveBefore(*DestBlock, InsertPos);
5635 ++NumSunkInst;
5636
5637 // Also sink all related debug uses from the source basic block. Otherwise we
5638 // get debug use before the def. Attempt to salvage debug uses first, to
5639 // maximise the range variables have location for. If we cannot salvage, then
5640 // mark the location undef: we know it was supposed to receive a new location
5641 // here, but that computation has been sunk.
5642 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5643 findDbgUsers(I, DbgVariableRecords);
5644 if (!DbgVariableRecords.empty())
5645 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5646 DbgVariableRecords);
5647
5648 // PS: there are numerous flaws with this behaviour, not least that right now
5649 // assignments can be re-ordered past other assignments to the same variable
5650 // if they use different Values. Creating more undef assignements can never be
5651 // undone. And salvaging all users outside of this block can un-necessarily
5652 // alter the lifetime of the live-value that the variable refers to.
5653 // Some of these things can be resolved by tolerating debug use-before-defs in
5654 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5655 // being used for more architectures.
5656
5657 return true;
5658}
5659
5661 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5662 BasicBlock *DestBlock,
5663 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5664 // For all debug values in the destination block, the sunk instruction
5665 // will still be available, so they do not need to be dropped.
5666
5667 // Fetch all DbgVariableRecords not already in the destination.
5668 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5669 for (auto &DVR : DbgVariableRecords)
5670 if (DVR->getParent() != DestBlock)
5671 DbgVariableRecordsToSalvage.push_back(DVR);
5672
5673 // Fetch a second collection, of DbgVariableRecords in the source block that
5674 // we're going to sink.
5675 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5676 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5677 if (DVR->getParent() == SrcBlock)
5678 DbgVariableRecordsToSink.push_back(DVR);
5679
5680 // Sort DbgVariableRecords according to their position in the block. This is a
5681 // partial order: DbgVariableRecords attached to different instructions will
5682 // be ordered by the instruction order, but DbgVariableRecords attached to the
5683 // same instruction won't have an order.
5684 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5685 return B->getInstruction()->comesBefore(A->getInstruction());
5686 };
5687 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5688
5689 // If there are two assignments to the same variable attached to the same
5690 // instruction, the ordering between the two assignments is important. Scan
5691 // for this (rare) case and establish which is the last assignment.
5692 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5694 if (DbgVariableRecordsToSink.size() > 1) {
5696 // Count how many assignments to each variable there is per instruction.
5697 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5698 DebugVariable DbgUserVariable =
5699 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5700 DVR->getDebugLoc()->getInlinedAt());
5701 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5702 }
5703
5704 // If there are any instructions with two assignments, add them to the
5705 // FilterOutMap to record that they need extra filtering.
5707 for (auto It : CountMap) {
5708 if (It.second > 1) {
5709 FilterOutMap[It.first] = nullptr;
5710 DupSet.insert(It.first.first);
5711 }
5712 }
5713
5714 // For all instruction/variable pairs needing extra filtering, find the
5715 // latest assignment.
5716 for (const Instruction *Inst : DupSet) {
5717 for (DbgVariableRecord &DVR :
5718 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5719 DebugVariable DbgUserVariable =
5720 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5721 DVR.getDebugLoc()->getInlinedAt());
5722 auto FilterIt =
5723 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5724 if (FilterIt == FilterOutMap.end())
5725 continue;
5726 if (FilterIt->second != nullptr)
5727 continue;
5728 FilterIt->second = &DVR;
5729 }
5730 }
5731 }
5732
5733 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5734 // out any duplicate assignments identified above.
5736 SmallSet<DebugVariable, 4> SunkVariables;
5737 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5739 continue;
5740
5741 DebugVariable DbgUserVariable =
5742 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5743 DVR->getDebugLoc()->getInlinedAt());
5744
5745 // For any variable where there were multiple assignments in the same place,
5746 // ignore all but the last assignment.
5747 if (!FilterOutMap.empty()) {
5748 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5749 auto It = FilterOutMap.find(IVP);
5750
5751 // Filter out.
5752 if (It != FilterOutMap.end() && It->second != DVR)
5753 continue;
5754 }
5755
5756 if (!SunkVariables.insert(DbgUserVariable).second)
5757 continue;
5758
5759 if (DVR->isDbgAssign())
5760 continue;
5761
5762 DVRClones.emplace_back(DVR->clone());
5763 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5764 }
5765
5766 // Perform salvaging without the clones, then sink the clones.
5767 if (DVRClones.empty())
5768 return;
5769
5770 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5771
5772 // The clones are in reverse order of original appearance. Assert that the
5773 // head bit is set on the iterator as we _should_ have received it via
5774 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5775 // we'll repeatedly insert at the head, such as:
5776 // DVR-3 (third insertion goes here)
5777 // DVR-2 (second insertion goes here)
5778 // DVR-1 (first insertion goes here)
5779 // Any-Prior-DVRs
5780 // InsertPtInst
5781 assert(InsertPos.getHeadBit());
5782 for (DbgVariableRecord *DVRClone : DVRClones) {
5783 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5784 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5785 }
5786}
5787
5789 while (!Worklist.isEmpty()) {
5790 // Walk deferred instructions in reverse order, and push them to the
5791 // worklist, which means they'll end up popped from the worklist in-order.
5792 while (Instruction *I = Worklist.popDeferred()) {
5793 // Check to see if we can DCE the instruction. We do this already here to
5794 // reduce the number of uses and thus allow other folds to trigger.
5795 // Note that eraseInstFromFunction() may push additional instructions on
5796 // the deferred worklist, so this will DCE whole instruction chains.
5799 ++NumDeadInst;
5800 continue;
5801 }
5802
5803 Worklist.push(I);
5804 }
5805
5806 Instruction *I = Worklist.removeOne();
5807 if (I == nullptr) continue; // skip null values.
5808
5809 // Check to see if we can DCE the instruction.
5812 ++NumDeadInst;
5813 continue;
5814 }
5815
5816 if (!DebugCounter::shouldExecute(VisitCounter))
5817 continue;
5818
5819 // See if we can trivially sink this instruction to its user if we can
5820 // prove that the successor is not executed more frequently than our block.
5821 // Return the UserBlock if successful.
5822 auto getOptionalSinkBlockForInst =
5823 [this](Instruction *I) -> std::optional<BasicBlock *> {
5824 if (!EnableCodeSinking)
5825 return std::nullopt;
5826
5827 BasicBlock *BB = I->getParent();
5828 BasicBlock *UserParent = nullptr;
5829 unsigned NumUsers = 0;
5830
5831 for (Use &U : I->uses()) {
5832 User *User = U.getUser();
5833 if (User->isDroppable()) {
5834 // Do not sink if there are dereferenceable assumes that would be
5835 // removed.
5837 if (II->getIntrinsicID() != Intrinsic::assume ||
5838 !II->getOperandBundle("dereferenceable"))
5839 continue;
5840 }
5841
5842 if (NumUsers > MaxSinkNumUsers)
5843 return std::nullopt;
5844
5845 Instruction *UserInst = cast<Instruction>(User);
5846 // Special handling for Phi nodes - get the block the use occurs in.
5847 BasicBlock *UserBB = UserInst->getParent();
5848 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5849 UserBB = PN->getIncomingBlock(U);
5850 // Bail out if we have uses in different blocks. We don't do any
5851 // sophisticated analysis (i.e finding NearestCommonDominator of these
5852 // use blocks).
5853 if (UserParent && UserParent != UserBB)
5854 return std::nullopt;
5855 UserParent = UserBB;
5856
5857 // Make sure these checks are done only once, naturally we do the checks
5858 // the first time we get the userparent, this will save compile time.
5859 if (NumUsers == 0) {
5860 // Try sinking to another block. If that block is unreachable, then do
5861 // not bother. SimplifyCFG should handle it.
5862 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5863 return std::nullopt;
5864
5865 auto *Term = UserParent->getTerminator();
5866 // See if the user is one of our successors that has only one
5867 // predecessor, so that we don't have to split the critical edge.
5868 // Another option where we can sink is a block that ends with a
5869 // terminator that does not pass control to other block (such as
5870 // return or unreachable or resume). In this case:
5871 // - I dominates the User (by SSA form);
5872 // - the User will be executed at most once.
5873 // So sinking I down to User is always profitable or neutral.
5874 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5875 return std::nullopt;
5876
5877 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5878 }
5879
5880 NumUsers++;
5881 }
5882
5883 // No user or only has droppable users.
5884 if (!UserParent)
5885 return std::nullopt;
5886
5887 return UserParent;
5888 };
5889
5890 auto OptBB = getOptionalSinkBlockForInst(I);
5891 if (OptBB) {
5892 auto *UserParent = *OptBB;
5893 // Okay, the CFG is simple enough, try to sink this instruction.
5894 if (tryToSinkInstruction(I, UserParent)) {
5895 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5896 MadeIRChange = true;
5897 // We'll add uses of the sunk instruction below, but since
5898 // sinking can expose opportunities for it's *operands* add
5899 // them to the worklist
5900 for (Use &U : I->operands())
5901 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5902 Worklist.push(OpI);
5903 }
5904 }
5905
5906 // Now that we have an instruction, try combining it to simplify it.
5907 Builder.SetInsertPoint(I);
5908 Builder.CollectMetadataToCopy(
5909 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5910
5911#ifndef NDEBUG
5912 std::string OrigI;
5913#endif
5914 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5915 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5916
5917 if (Instruction *Result = visit(*I)) {
5918 ++NumCombined;
5919 // Should we replace the old instruction with a new one?
5920 if (Result != I) {
5921 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5922 << " New = " << *Result << '\n');
5923
5924 // We copy the old instruction's DebugLoc to the new instruction, unless
5925 // InstCombine already assigned a DebugLoc to it, in which case we
5926 // should trust the more specifically selected DebugLoc.
5927 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5928 // We also copy annotation metadata to the new instruction.
5929 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5930 // Everything uses the new instruction now.
5931 I->replaceAllUsesWith(Result);
5932
5933 // Move the name to the new instruction first.
5934 Result->takeName(I);
5935
5936 // Insert the new instruction into the basic block...
5937 BasicBlock *InstParent = I->getParent();
5938 BasicBlock::iterator InsertPos = I->getIterator();
5939
5940 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5941 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5942 // We need to fix up the insertion point.
5943 if (isa<PHINode>(I)) // PHI -> Non-PHI
5944 InsertPos = InstParent->getFirstInsertionPt();
5945 else // Non-PHI -> PHI
5946 InsertPos = InstParent->getFirstNonPHIIt();
5947 }
5948
5949 Result->insertInto(InstParent, InsertPos);
5950
5951 // Push the new instruction and any users onto the worklist.
5952 Worklist.pushUsersToWorkList(*Result);
5953 Worklist.push(Result);
5954
5956 } else {
5957 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5958 << " New = " << *I << '\n');
5959
5960 // If the instruction was modified, it's possible that it is now dead.
5961 // if so, remove it.
5964 } else {
5965 Worklist.pushUsersToWorkList(*I);
5966 Worklist.push(I);
5967 }
5968 }
5969 MadeIRChange = true;
5970 }
5971 }
5972
5973 Worklist.zap();
5974 return MadeIRChange;
5975}
5976
5977// Track the scopes used by !alias.scope and !noalias. In a function, a
5978// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5979// by both sets. If not, the declaration of the scope can be safely omitted.
5980// The MDNode of the scope can be omitted as well for the instructions that are
5981// part of this function. We do not do that at this point, as this might become
5982// too time consuming to do.
5984 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5985 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5986
5987public:
5989 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5990 if (!I->hasMetadataOtherThanDebugLoc())
5991 return;
5992
5993 auto Track = [](Metadata *ScopeList, auto &Container) {
5994 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5995 if (!MDScopeList || !Container.insert(MDScopeList).second)
5996 return;
5997 for (const auto &MDOperand : MDScopeList->operands())
5998 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5999 Container.insert(MDScope);
6000 };
6001
6002 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
6003 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
6004 }
6005
6008 if (!Decl)
6009 return false;
6010
6011 assert(Decl->use_empty() &&
6012 "llvm.experimental.noalias.scope.decl in use ?");
6013 const MDNode *MDSL = Decl->getScopeList();
6014 assert(MDSL->getNumOperands() == 1 &&
6015 "llvm.experimental.noalias.scope should refer to a single scope");
6016 auto &MDOperand = MDSL->getOperand(0);
6017 if (auto *MD = dyn_cast<MDNode>(MDOperand))
6018 return !UsedAliasScopesAndLists.contains(MD) ||
6019 !UsedNoAliasScopesAndLists.contains(MD);
6020
6021 // Not an MDNode ? throw away.
6022 return true;
6023 }
6024};
6025
6026/// Populate the IC worklist from a function, by walking it in reverse
6027/// post-order and adding all reachable code to the worklist.
6028///
6029/// This has a couple of tricks to make the code faster and more powerful. In
6030/// particular, we constant fold and DCE instructions as we go, to avoid adding
6031/// them to the worklist (this significantly speeds up instcombine on code where
6032/// many instructions are dead or constant). Additionally, if we find a branch
6033/// whose condition is a known constant, we only visit the reachable successors.
6035 bool MadeIRChange = false;
6037 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
6038 DenseMap<Constant *, Constant *> FoldedConstants;
6039 AliasScopeTracker SeenAliasScopes;
6040
6041 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
6042 for (BasicBlock *Succ : successors(BB))
6043 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
6044 for (PHINode &PN : Succ->phis())
6045 for (Use &U : PN.incoming_values())
6046 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
6047 U.set(PoisonValue::get(PN.getType()));
6048 MadeIRChange = true;
6049 }
6050 };
6051
6052 for (BasicBlock *BB : RPOT) {
6053 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
6054 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
6055 })) {
6056 HandleOnlyLiveSuccessor(BB, nullptr);
6057 continue;
6058 }
6059 LiveBlocks.insert(BB);
6060
6061 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
6062 // ConstantProp instruction if trivially constant.
6063 if (!Inst.use_empty() &&
6064 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
6065 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
6066 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
6067 << '\n');
6068 Inst.replaceAllUsesWith(C);
6069 ++NumConstProp;
6070 if (isInstructionTriviallyDead(&Inst, &TLI))
6071 Inst.eraseFromParent();
6072 MadeIRChange = true;
6073 continue;
6074 }
6075
6076 // See if we can constant fold its operands.
6077 for (Use &U : Inst.operands()) {
6079 continue;
6080
6081 auto *C = cast<Constant>(U);
6082 Constant *&FoldRes = FoldedConstants[C];
6083 if (!FoldRes)
6084 FoldRes = ConstantFoldConstant(C, DL, &TLI);
6085
6086 if (FoldRes != C) {
6087 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
6088 << "\n Old = " << *C
6089 << "\n New = " << *FoldRes << '\n');
6090 U = FoldRes;
6091 MadeIRChange = true;
6092 }
6093 }
6094
6095 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
6096 // these call instructions consumes non-trivial amount of time and
6097 // provides no value for the optimization.
6098 if (!Inst.isDebugOrPseudoInst()) {
6099 InstrsForInstructionWorklist.push_back(&Inst);
6100 SeenAliasScopes.analyse(&Inst);
6101 }
6102 }
6103
6104 // If this is a branch or switch on a constant, mark only the single
6105 // live successor. Otherwise assume all successors are live.
6106 Instruction *TI = BB->getTerminator();
6107 if (CondBrInst *BI = dyn_cast<CondBrInst>(TI)) {
6108 if (isa<UndefValue>(BI->getCondition())) {
6109 // Branch on undef is UB.
6110 HandleOnlyLiveSuccessor(BB, nullptr);
6111 continue;
6112 }
6113 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
6114 bool CondVal = Cond->getZExtValue();
6115 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
6116 continue;
6117 }
6118 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
6119 if (isa<UndefValue>(SI->getCondition())) {
6120 // Switch on undef is UB.
6121 HandleOnlyLiveSuccessor(BB, nullptr);
6122 continue;
6123 }
6124 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
6125 HandleOnlyLiveSuccessor(BB,
6126 SI->findCaseValue(Cond)->getCaseSuccessor());
6127 continue;
6128 }
6129 }
6130 }
6131
6132 // Remove instructions inside unreachable blocks. This prevents the
6133 // instcombine code from having to deal with some bad special cases, and
6134 // reduces use counts of instructions.
6135 for (BasicBlock &BB : F) {
6136 if (LiveBlocks.count(&BB))
6137 continue;
6138
6139 unsigned NumDeadInstInBB;
6140 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
6141
6142 MadeIRChange |= NumDeadInstInBB != 0;
6143 NumDeadInst += NumDeadInstInBB;
6144 }
6145
6146 // Once we've found all of the instructions to add to instcombine's worklist,
6147 // add them in reverse order. This way instcombine will visit from the top
6148 // of the function down. This jives well with the way that it adds all uses
6149 // of instructions to the worklist after doing a transformation, thus avoiding
6150 // some N^2 behavior in pathological cases.
6151 Worklist.reserve(InstrsForInstructionWorklist.size());
6152 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
6153 // DCE instruction if trivially dead. As we iterate in reverse program
6154 // order here, we will clean up whole chains of dead instructions.
6155 if (isInstructionTriviallyDead(Inst, &TLI) ||
6156 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
6157 ++NumDeadInst;
6158 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
6159 salvageDebugInfo(*Inst);
6160 Inst->eraseFromParent();
6161 MadeIRChange = true;
6162 continue;
6163 }
6164
6165 Worklist.push(Inst);
6166 }
6167
6168 return MadeIRChange;
6169}
6170
6172 // Collect backedges.
6173 SmallVector<bool> Visited(F.getMaxBlockNumber());
6174 for (BasicBlock *BB : RPOT) {
6175 Visited[BB->getNumber()] = true;
6176 for (BasicBlock *Succ : successors(BB))
6177 if (Visited[Succ->getNumber()])
6178 BackEdges.insert({BB, Succ});
6179 }
6180 ComputedBackEdges = true;
6181}
6182
6188 const InstCombineOptions &Opts) {
6189 auto &DL = F.getDataLayout();
6190 bool VerifyFixpoint = Opts.VerifyFixpoint &&
6191 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
6192
6193 /// Builder - This is an IRBuilder that automatically inserts new
6194 /// instructions into the worklist when they are created.
6196 F.getContext(), TargetFolder(DL),
6197 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
6198 Worklist.add(I);
6199 if (auto *Assume = dyn_cast<AssumeInst>(I))
6200 AC.registerAssumption(Assume);
6201 }));
6202
6204
6205 // Lower dbg.declare intrinsics otherwise their value may be clobbered
6206 // by instcombiner.
6207 bool MadeIRChange = false;
6209 MadeIRChange = LowerDbgDeclare(F);
6210
6211 // Iterate while there is work to do.
6212 unsigned Iteration = 0;
6213 while (true) {
6214 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
6215 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
6216 << " on " << F.getName()
6217 << " reached; stopping without verifying fixpoint\n");
6218 break;
6219 }
6220
6221 ++Iteration;
6222 ++NumWorklistIterations;
6223 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
6224 << F.getName() << "\n");
6225
6226 InstCombinerImpl IC(Worklist, Builder, F, AA, AC, TLI, TTI, DT, ORE, BFI,
6227 BPI, PSI, DL, RPOT);
6229 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
6230 MadeChangeInThisIteration |= IC.run();
6231 if (!MadeChangeInThisIteration)
6232 break;
6233
6234 MadeIRChange = true;
6235 if (Iteration > Opts.MaxIterations) {
6237 "Instruction Combining on " + Twine(F.getName()) +
6238 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
6239 " iterations. " +
6240 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6241 "'instcombine-no-verify-fixpoint' to suppress this error.");
6242 }
6243 }
6244
6245 if (Iteration == 1)
6246 ++NumOneIteration;
6247 else if (Iteration == 2)
6248 ++NumTwoIterations;
6249 else if (Iteration == 3)
6250 ++NumThreeIterations;
6251 else
6252 ++NumFourOrMoreIterations;
6253
6254 return MadeIRChange;
6255}
6256
6258
6260 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6261 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6262 OS, MapClassName2PassName);
6263 OS << '<';
6264 OS << "max-iterations=" << Options.MaxIterations << ";";
6265 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6266 OS << '>';
6267}
6268
6269char InstCombinePass::ID = 0;
6270
6273 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6274 // No changes since last InstCombine pass, exit early.
6275 if (LRT.shouldSkip(&ID))
6276 return PreservedAnalyses::all();
6277
6278 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6279 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6280 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6282 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6283
6284 auto *AA = &AM.getResult<AAManager>(F);
6285 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6286 ProfileSummaryInfo *PSI =
6287 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6288 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6289 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6291
6292 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6293 BFI, BPI, PSI, Options)) {
6294 // No changes, all analyses are preserved.
6295 LRT.update(&ID, /*Changed=*/false);
6296 return PreservedAnalyses::all();
6297 }
6298
6299 // Mark all the analyses that instcombine updates as preserved.
6301 LRT.update(&ID, /*Changed=*/true);
6304 return PA;
6305}
6306
6322
6324 if (skipFunction(F))
6325 return false;
6326
6327 // Required analyses.
6328 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6329 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6330 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6332 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6334
6335 // Optional analyses.
6336 ProfileSummaryInfo *PSI =
6338 BlockFrequencyInfo *BFI =
6339 (PSI && PSI->hasProfileSummary()) ?
6341 nullptr;
6342 BranchProbabilityInfo *BPI = nullptr;
6343 if (auto *WrapperPass =
6345 BPI = &WrapperPass->getBPI();
6346
6347 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6348 BFI, BPI, PSI, InstCombineOptions());
6349}
6350
6352
6354
6356 "Combine redundant instructions", false, false)
6367 "Combine redundant instructions", false, false)
6368
6369// Initialization Routines.
6373
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "(X ROp Y) LOp Z" is always equal to "(X LOp Z) ROp (Y LOp Z)".
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static Constant * constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector, Constant *Splat, bool SplatLHS, const DataLayout &DL)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static Instruction * foldSpliceBinOp(BinaryOperator &Inst, InstCombiner::BuilderTy &Builder)
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< Instruction * > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static cl::opt< unsigned > MaxAllocSiteRemovableUsers("instcombine-max-allocsite-removable-users", cl::Hidden, cl::init(2048), cl::desc("Maximum number of users to visit in alloc-site " "removability analysis"))
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file contains the declarations for metadata subclasses.
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsSelect(unsigned Opcode, bool CheckOnlyCC=false)
Check if the opcode is a SELECT or SELECT_CC variant.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
The Input class is used to parse a yaml document into in-memory structs and vectors.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:344
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:214
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1809
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:424
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1941
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:967
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:372
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1979
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:834
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2011
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:406
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:441
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1992
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:858
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:39
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:217
size_t size() const
Get the array size.
Definition ArrayRef.h:140
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:482
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Conditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
ConstantArray - Constant Array Declarations.
Definition Constants.h:579
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:938
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:663
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
bool empty() const
Definition DenseMap.h:109
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:239
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:809
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2086
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:544
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2853
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * visitCondBrInst(CondBrInst &BI)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth=0)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * visitUncondBrInst(UncondBrInst &BI)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1080
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1444
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1450
Tracking metadata reference owned by Metadata.
Definition Metadata.h:902
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:64
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1660
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:116
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:134
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:184
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:65
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:311
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:110
Unconditional Branch instruction.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:254
op_range operands()
Definition User.h:267
op_iterator op_begin()
Definition User.h:259
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:119
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:25
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:737
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:162
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:146
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
bool use_empty() const
Definition Value.h:346
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:890
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:126
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
match_combine_and< Ty... > m_CombineAnd(const Ty &...Ps)
Combine pattern matchers matching all of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
auto m_Poison()
Match an arbitrary poison constant.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_or< CastInst_match< OpTy, UIToFPInst >, CastInst_match< OpTy, SIToFPInst > > m_IToFP(const OpTy &Op)
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_Constant()
Match an arbitrary Constant and ignore it.
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
Splat_match< T > m_ConstantSplat(const T &SubPattern)
Match a constant splat. TODO: Extend this to non-constant splats.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
SelectLike_match< CondTy, LTy, RTy > m_SelectLike(const CondTy &C, const LTy &TrueC, const RTy &FalseC)
Matches a value that behaves like a boolean-controlled select, i.e.
auto m_MaxOrMin(const LHS &L, const RHS &R)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
m_Intrinsic_Ty< Opnd0 >::Ty m_Ctpop(const Opnd0 &Op0)
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_VectorInsert(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:557
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2115
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition LoopInfo.cpp:60
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2500
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:153
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1683
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1652
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2483
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:149
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1810
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1677
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2052
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2429
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:322
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:2018
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2145
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:265
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:262
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:89
SimplifyQuery getWithInstruction(const Instruction *I) const