LLVM 22.0.0git
InstructionCombining.cpp
Go to the documentation of this file.
1//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// InstructionCombining - Combine instructions to form fewer, simple
10// instructions. This pass does not modify the CFG. This pass is where
11// algebraic simplification happens.
12//
13// This pass combines things like:
14// %Y = add i32 %X, 1
15// %Z = add i32 %Y, 1
16// into:
17// %Z = add i32 %X, 2
18//
19// This is a simple worklist driven algorithm.
20//
21// This pass guarantees that the following canonicalizations are performed on
22// the program:
23// 1. If a binary operator has a constant operand, it is moved to the RHS
24// 2. Bitwise operators with constant operands are always grouped so that
25// shifts are performed first, then or's, then and's, then xor's.
26// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27// 4. All cmp instructions on boolean values are replaced with logical ops
28// 5. add X, X is represented as (X*2) => (X << 1)
29// 6. Multiplies with a power-of-two constant argument are transformed into
30// shifts.
31// ... etc.
32//
33//===----------------------------------------------------------------------===//
34
35#include "InstCombineInternal.h"
36#include "llvm/ADT/APFloat.h"
37#include "llvm/ADT/APInt.h"
38#include "llvm/ADT/ArrayRef.h"
39#include "llvm/ADT/DenseMap.h"
42#include "llvm/ADT/Statistic.h"
47#include "llvm/Analysis/CFG.h"
62#include "llvm/IR/BasicBlock.h"
63#include "llvm/IR/CFG.h"
64#include "llvm/IR/Constant.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DIBuilder.h"
67#include "llvm/IR/DataLayout.h"
68#include "llvm/IR/DebugInfo.h"
70#include "llvm/IR/Dominators.h"
72#include "llvm/IR/Function.h"
74#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/InstrTypes.h"
76#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Intrinsics.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/Operator.h"
82#include "llvm/IR/PassManager.h"
84#include "llvm/IR/Type.h"
85#include "llvm/IR/Use.h"
86#include "llvm/IR/User.h"
87#include "llvm/IR/Value.h"
88#include "llvm/IR/ValueHandle.h"
93#include "llvm/Support/Debug.h"
102#include <algorithm>
103#include <cassert>
104#include <cstdint>
105#include <memory>
106#include <optional>
107#include <string>
108#include <utility>
109
110#define DEBUG_TYPE "instcombine"
112#include <optional>
113
114using namespace llvm;
115using namespace llvm::PatternMatch;
116
117STATISTIC(NumWorklistIterations,
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration, "Number of functions with one iteration");
120STATISTIC(NumTwoIterations, "Number of functions with two iterations");
121STATISTIC(NumThreeIterations, "Number of functions with three iterations");
122STATISTIC(NumFourOrMoreIterations,
123 "Number of functions with four or more iterations");
124
125STATISTIC(NumCombined , "Number of insts combined");
126STATISTIC(NumConstProp, "Number of constant folds");
127STATISTIC(NumDeadInst , "Number of dead inst eliminated");
128STATISTIC(NumSunkInst , "Number of instructions sunk");
129STATISTIC(NumExpand, "Number of expansions");
130STATISTIC(NumFactor , "Number of factorizations");
131STATISTIC(NumReassoc , "Number of reassociations");
132DEBUG_COUNTER(VisitCounter, "instcombine-visit",
133 "Controls which instructions are visited");
134
135static cl::opt<bool>
136EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
137 cl::init(true));
138
140 "instcombine-max-sink-users", cl::init(32),
141 cl::desc("Maximum number of undroppable users for instruction sinking"));
142
144MaxArraySize("instcombine-maxarray-size", cl::init(1024),
145 cl::desc("Maximum array size considered when doing a combine"));
146
147// FIXME: Remove this flag when it is no longer necessary to convert
148// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
149// increases variable availability at the cost of accuracy. Variables that
150// cannot be promoted by mem2reg or SROA will be described as living in memory
151// for their entire lifetime. However, passes like DSE and instcombine can
152// delete stores to the alloca, leading to misleading and inaccurate debug
153// information. This flag can be removed when those passes are fixed.
154static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
155 cl::Hidden, cl::init(true));
156
157std::optional<Instruction *>
159 // Handle target specific intrinsics
160 if (II.getCalledFunction()->isTargetIntrinsic()) {
161 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*this, II);
162 }
163 return std::nullopt;
164}
165
167 IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
168 bool &KnownBitsComputed) {
169 // Handle target specific intrinsics
170 if (II.getCalledFunction()->isTargetIntrinsic()) {
171 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
172 *this, II, DemandedMask, Known, KnownBitsComputed);
173 }
174 return std::nullopt;
175}
176
178 IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
179 APInt &PoisonElts2, APInt &PoisonElts3,
180 std::function<void(Instruction *, unsigned, APInt, APInt &)>
181 SimplifyAndSetOp) {
182 // Handle target specific intrinsics
183 if (II.getCalledFunction()->isTargetIntrinsic()) {
184 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
185 *this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
186 SimplifyAndSetOp);
187 }
188 return std::nullopt;
189}
190
191bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
192 // Approved exception for TTI use: This queries a legality property of the
193 // target, not an profitability heuristic. Ideally this should be part of
194 // DataLayout instead.
195 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
196}
197
198Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {
199 if (!RewriteGEP)
200 return llvm::emitGEPOffset(&Builder, DL, GEP);
201
202 IRBuilderBase::InsertPointGuard Guard(Builder);
203 auto *Inst = dyn_cast<Instruction>(GEP);
204 if (Inst)
205 Builder.SetInsertPoint(Inst);
206
207 Value *Offset = EmitGEPOffset(GEP);
208 // Rewrite non-trivial GEPs to avoid duplicating the offset arithmetic.
209 if (Inst && !GEP->hasAllConstantIndices() &&
210 !GEP->getSourceElementType()->isIntegerTy(8)) {
212 *Inst, Builder.CreateGEP(Builder.getInt8Ty(), GEP->getPointerOperand(),
213 Offset, "", GEP->getNoWrapFlags()));
215 }
216 return Offset;
217}
218
219Value *InstCombinerImpl::EmitGEPOffsets(ArrayRef<GEPOperator *> GEPs,
220 GEPNoWrapFlags NW, Type *IdxTy,
221 bool RewriteGEPs) {
222 auto Add = [&](Value *Sum, Value *Offset) -> Value * {
223 if (Sum)
224 return Builder.CreateAdd(Sum, Offset, "", NW.hasNoUnsignedWrap(),
225 NW.isInBounds());
226 else
227 return Offset;
228 };
229
230 Value *Sum = nullptr;
231 Value *OneUseSum = nullptr;
232 Value *OneUseBase = nullptr;
233 GEPNoWrapFlags OneUseFlags = GEPNoWrapFlags::all();
234 for (GEPOperator *GEP : reverse(GEPs)) {
235 Value *Offset;
236 {
237 // Expand the offset at the point of the previous GEP to enable rewriting.
238 // However, use the original insertion point for calculating Sum.
239 IRBuilderBase::InsertPointGuard Guard(Builder);
240 auto *Inst = dyn_cast<Instruction>(GEP);
241 if (RewriteGEPs && Inst)
242 Builder.SetInsertPoint(Inst);
243
245 if (Offset->getType() != IdxTy)
246 Offset = Builder.CreateVectorSplat(
247 cast<VectorType>(IdxTy)->getElementCount(), Offset);
248 if (GEP->hasOneUse()) {
249 // Offsets of one-use GEPs will be merged into the next multi-use GEP.
250 OneUseSum = Add(OneUseSum, Offset);
251 OneUseFlags = OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags());
252 if (!OneUseBase)
253 OneUseBase = GEP->getPointerOperand();
254 continue;
255 }
256
257 if (OneUseSum)
258 Offset = Add(OneUseSum, Offset);
259
260 // Rewrite the GEP to reuse the computed offset. This also includes
261 // offsets from preceding one-use GEPs.
262 if (RewriteGEPs && Inst &&
263 !(GEP->getSourceElementType()->isIntegerTy(8) &&
264 GEP->getOperand(1) == Offset)) {
266 *Inst,
267 Builder.CreatePtrAdd(
268 OneUseBase ? OneUseBase : GEP->getPointerOperand(), Offset, "",
269 OneUseFlags.intersectForOffsetAdd(GEP->getNoWrapFlags())));
271 }
272 }
273
274 Sum = Add(Sum, Offset);
275 OneUseSum = OneUseBase = nullptr;
276 OneUseFlags = GEPNoWrapFlags::all();
277 }
278 if (OneUseSum)
279 Sum = Add(Sum, OneUseSum);
280 if (!Sum)
281 return Constant::getNullValue(IdxTy);
282 return Sum;
283}
284
285/// Legal integers and common types are considered desirable. This is used to
286/// avoid creating instructions with types that may not be supported well by the
287/// the backend.
288/// NOTE: This treats i8, i16 and i32 specially because they are common
289/// types in frontend languages.
290bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
291 switch (BitWidth) {
292 case 8:
293 case 16:
294 case 32:
295 return true;
296 default:
297 return DL.isLegalInteger(BitWidth);
298 }
299}
300
301/// Return true if it is desirable to convert an integer computation from a
302/// given bit width to a new bit width.
303/// We don't want to convert from a legal or desirable type (like i8) to an
304/// illegal type or from a smaller to a larger illegal type. A width of '1'
305/// is always treated as a desirable type because i1 is a fundamental type in
306/// IR, and there are many specialized optimizations for i1 types.
307/// Common/desirable widths are equally treated as legal to convert to, in
308/// order to open up more combining opportunities.
309bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
310 unsigned ToWidth) const {
311 bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
312 bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
313
314 // Convert to desirable widths even if they are not legal types.
315 // Only shrink types, to prevent infinite loops.
316 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
317 return true;
318
319 // If this is a legal or desiable integer from type, and the result would be
320 // an illegal type, don't do the transformation.
321 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
322 return false;
323
324 // Otherwise, if both are illegal, do not increase the size of the result. We
325 // do allow things like i160 -> i64, but not i64 -> i160.
326 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
327 return false;
328
329 return true;
330}
331
332/// Return true if it is desirable to convert a computation from 'From' to 'To'.
333/// We don't want to convert from a legal to an illegal type or from a smaller
334/// to a larger illegal type. i1 is always treated as a legal type because it is
335/// a fundamental type in IR, and there are many specialized optimizations for
336/// i1 types.
337bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
338 // TODO: This could be extended to allow vectors. Datalayout changes might be
339 // needed to properly support that.
340 if (!From->isIntegerTy() || !To->isIntegerTy())
341 return false;
342
343 unsigned FromWidth = From->getPrimitiveSizeInBits();
344 unsigned ToWidth = To->getPrimitiveSizeInBits();
345 return shouldChangeType(FromWidth, ToWidth);
346}
347
348// Return true, if No Signed Wrap should be maintained for I.
349// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
350// where both B and C should be ConstantInts, results in a constant that does
351// not overflow. This function only handles the Add/Sub/Mul opcodes. For
352// all other opcodes, the function conservatively returns false.
355 if (!OBO || !OBO->hasNoSignedWrap())
356 return false;
357
358 const APInt *BVal, *CVal;
359 if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
360 return false;
361
362 // We reason about Add/Sub/Mul Only.
363 bool Overflow = false;
364 switch (I.getOpcode()) {
365 case Instruction::Add:
366 (void)BVal->sadd_ov(*CVal, Overflow);
367 break;
368 case Instruction::Sub:
369 (void)BVal->ssub_ov(*CVal, Overflow);
370 break;
371 case Instruction::Mul:
372 (void)BVal->smul_ov(*CVal, Overflow);
373 break;
374 default:
375 // Conservatively return false for other opcodes.
376 return false;
377 }
378 return !Overflow;
379}
380
383 return OBO && OBO->hasNoUnsignedWrap();
384}
385
388 return OBO && OBO->hasNoSignedWrap();
389}
390
391/// Conservatively clears subclassOptionalData after a reassociation or
392/// commutation. We preserve fast-math flags when applicable as they can be
393/// preserved.
396 if (!FPMO) {
397 I.clearSubclassOptionalData();
398 return;
399 }
400
401 FastMathFlags FMF = I.getFastMathFlags();
402 I.clearSubclassOptionalData();
403 I.setFastMathFlags(FMF);
404}
405
406/// Combine constant operands of associative operations either before or after a
407/// cast to eliminate one of the associative operations:
408/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
409/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
411 InstCombinerImpl &IC) {
412 auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
413 if (!Cast || !Cast->hasOneUse())
414 return false;
415
416 // TODO: Enhance logic for other casts and remove this check.
417 auto CastOpcode = Cast->getOpcode();
418 if (CastOpcode != Instruction::ZExt)
419 return false;
420
421 // TODO: Enhance logic for other BinOps and remove this check.
422 if (!BinOp1->isBitwiseLogicOp())
423 return false;
424
425 auto AssocOpcode = BinOp1->getOpcode();
426 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
427 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
428 return false;
429
430 Constant *C1, *C2;
431 if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
432 !match(BinOp2->getOperand(1), m_Constant(C2)))
433 return false;
434
435 // TODO: This assumes a zext cast.
436 // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
437 // to the destination type might lose bits.
438
439 // Fold the constants together in the destination type:
440 // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
441 const DataLayout &DL = IC.getDataLayout();
442 Type *DestTy = C1->getType();
443 Constant *CastC2 = ConstantFoldCastOperand(CastOpcode, C2, DestTy, DL);
444 if (!CastC2)
445 return false;
446 Constant *FoldedC = ConstantFoldBinaryOpOperands(AssocOpcode, C1, CastC2, DL);
447 if (!FoldedC)
448 return false;
449
450 IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
451 IC.replaceOperand(*BinOp1, 1, FoldedC);
453 Cast->dropPoisonGeneratingFlags();
454 return true;
455}
456
457// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
458// inttoptr ( ptrtoint (x) ) --> x
459Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
460 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
461 if (IntToPtr && DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
462 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
463 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
464 Type *CastTy = IntToPtr->getDestTy();
465 if (PtrToInt &&
466 CastTy->getPointerAddressSpace() ==
467 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
468 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
469 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
470 return PtrToInt->getOperand(0);
471 }
472 return nullptr;
473}
474
475/// This performs a few simplifications for operators that are associative or
476/// commutative:
477///
478/// Commutative operators:
479///
480/// 1. Order operands such that they are listed from right (least complex) to
481/// left (most complex). This puts constants before unary operators before
482/// binary operators.
483///
484/// Associative operators:
485///
486/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
487/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
488///
489/// Associative and commutative operators:
490///
491/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
492/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
493/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
494/// if C1 and C2 are constants.
496 Instruction::BinaryOps Opcode = I.getOpcode();
497 bool Changed = false;
498
499 do {
500 // Order operands such that they are listed from right (least complex) to
501 // left (most complex). This puts constants before unary operators before
502 // binary operators.
503 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
504 getComplexity(I.getOperand(1)))
505 Changed = !I.swapOperands();
506
507 if (I.isCommutative()) {
508 if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
509 replaceOperand(I, 0, Pair->first);
510 replaceOperand(I, 1, Pair->second);
511 Changed = true;
512 }
513 }
514
515 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
516 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
517
518 if (I.isAssociative()) {
519 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
520 if (Op0 && Op0->getOpcode() == Opcode) {
521 Value *A = Op0->getOperand(0);
522 Value *B = Op0->getOperand(1);
523 Value *C = I.getOperand(1);
524
525 // Does "B op C" simplify?
526 if (Value *V = simplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
527 // It simplifies to V. Form "A op V".
528 replaceOperand(I, 0, A);
529 replaceOperand(I, 1, V);
530 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
531 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
532
533 // Conservatively clear all optional flags since they may not be
534 // preserved by the reassociation. Reset nsw/nuw based on the above
535 // analysis.
537
538 // Note: this is only valid because SimplifyBinOp doesn't look at
539 // the operands to Op0.
540 if (IsNUW)
541 I.setHasNoUnsignedWrap(true);
542
543 if (IsNSW)
544 I.setHasNoSignedWrap(true);
545
546 Changed = true;
547 ++NumReassoc;
548 continue;
549 }
550 }
551
552 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
553 if (Op1 && Op1->getOpcode() == Opcode) {
554 Value *A = I.getOperand(0);
555 Value *B = Op1->getOperand(0);
556 Value *C = Op1->getOperand(1);
557
558 // Does "A op B" simplify?
559 if (Value *V = simplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
560 // It simplifies to V. Form "V op C".
561 replaceOperand(I, 0, V);
562 replaceOperand(I, 1, C);
563 // Conservatively clear the optional flags, since they may not be
564 // preserved by the reassociation.
566 Changed = true;
567 ++NumReassoc;
568 continue;
569 }
570 }
571 }
572
573 if (I.isAssociative() && I.isCommutative()) {
574 if (simplifyAssocCastAssoc(&I, *this)) {
575 Changed = true;
576 ++NumReassoc;
577 continue;
578 }
579
580 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
581 if (Op0 && Op0->getOpcode() == Opcode) {
582 Value *A = Op0->getOperand(0);
583 Value *B = Op0->getOperand(1);
584 Value *C = I.getOperand(1);
585
586 // Does "C op A" simplify?
587 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
588 // It simplifies to V. Form "V op B".
589 replaceOperand(I, 0, V);
590 replaceOperand(I, 1, B);
591 // Conservatively clear the optional flags, since they may not be
592 // preserved by the reassociation.
594 Changed = true;
595 ++NumReassoc;
596 continue;
597 }
598 }
599
600 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
601 if (Op1 && Op1->getOpcode() == Opcode) {
602 Value *A = I.getOperand(0);
603 Value *B = Op1->getOperand(0);
604 Value *C = Op1->getOperand(1);
605
606 // Does "C op A" simplify?
607 if (Value *V = simplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
608 // It simplifies to V. Form "B op V".
609 replaceOperand(I, 0, B);
610 replaceOperand(I, 1, V);
611 // Conservatively clear the optional flags, since they may not be
612 // preserved by the reassociation.
614 Changed = true;
615 ++NumReassoc;
616 continue;
617 }
618 }
619
620 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
621 // if C1 and C2 are constants.
622 Value *A, *B;
623 Constant *C1, *C2, *CRes;
624 if (Op0 && Op1 &&
625 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
626 match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
627 match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2)))) &&
628 (CRes = ConstantFoldBinaryOpOperands(Opcode, C1, C2, DL))) {
629 bool IsNUW = hasNoUnsignedWrap(I) &&
630 hasNoUnsignedWrap(*Op0) &&
631 hasNoUnsignedWrap(*Op1);
632 BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
633 BinaryOperator::CreateNUW(Opcode, A, B) :
634 BinaryOperator::Create(Opcode, A, B);
635
636 if (isa<FPMathOperator>(NewBO)) {
637 FastMathFlags Flags = I.getFastMathFlags() &
638 Op0->getFastMathFlags() &
639 Op1->getFastMathFlags();
640 NewBO->setFastMathFlags(Flags);
641 }
642 InsertNewInstWith(NewBO, I.getIterator());
643 NewBO->takeName(Op1);
644 replaceOperand(I, 0, NewBO);
645 replaceOperand(I, 1, CRes);
646 // Conservatively clear the optional flags, since they may not be
647 // preserved by the reassociation.
649 if (IsNUW)
650 I.setHasNoUnsignedWrap(true);
651
652 Changed = true;
653 continue;
654 }
655 }
656
657 // No further simplifications.
658 return Changed;
659 } while (true);
660}
661
662/// Return whether "X LOp (Y ROp Z)" is always equal to
663/// "(X LOp Y) ROp (X LOp Z)".
666 // X & (Y | Z) <--> (X & Y) | (X & Z)
667 // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
668 if (LOp == Instruction::And)
669 return ROp == Instruction::Or || ROp == Instruction::Xor;
670
671 // X | (Y & Z) <--> (X | Y) & (X | Z)
672 if (LOp == Instruction::Or)
673 return ROp == Instruction::And;
674
675 // X * (Y + Z) <--> (X * Y) + (X * Z)
676 // X * (Y - Z) <--> (X * Y) - (X * Z)
677 if (LOp == Instruction::Mul)
678 return ROp == Instruction::Add || ROp == Instruction::Sub;
679
680 return false;
681}
682
683/// Return whether "(X LOp Y) ROp Z" is always equal to
684/// "(X ROp Z) LOp (Y ROp Z)".
688 return leftDistributesOverRight(ROp, LOp);
689
690 // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
692
693 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
694 // but this requires knowing that the addition does not overflow and other
695 // such subtleties.
696}
697
698/// This function returns identity value for given opcode, which can be used to
699/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
701 if (isa<Constant>(V))
702 return nullptr;
703
704 return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
705}
706
707/// This function predicates factorization using distributive laws. By default,
708/// it just returns the 'Op' inputs. But for special-cases like
709/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
710/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
711/// allow more factorization opportunities.
714 Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {
715 assert(Op && "Expected a binary operator");
716 LHS = Op->getOperand(0);
717 RHS = Op->getOperand(1);
718 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
719 Constant *C;
720 if (match(Op, m_Shl(m_Value(), m_ImmConstant(C)))) {
721 // X << C --> X * (1 << C)
723 Instruction::Shl, ConstantInt::get(Op->getType(), 1), C);
724 assert(RHS && "Constant folding of immediate constants failed");
725 return Instruction::Mul;
726 }
727 // TODO: We can add other conversions e.g. shr => div etc.
728 }
729 if (Instruction::isBitwiseLogicOp(TopOpcode)) {
730 if (OtherOp && OtherOp->getOpcode() == Instruction::AShr &&
732 // lshr nneg C, X --> ashr nneg C, X
733 return Instruction::AShr;
734 }
735 }
736 return Op->getOpcode();
737}
738
739/// This tries to simplify binary operations by factorizing out common terms
740/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
743 Instruction::BinaryOps InnerOpcode, Value *A,
744 Value *B, Value *C, Value *D) {
745 assert(A && B && C && D && "All values must be provided");
746
747 Value *V = nullptr;
748 Value *RetVal = nullptr;
749 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
750 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
751
752 // Does "X op' Y" always equal "Y op' X"?
753 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
754
755 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
756 if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode)) {
757 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
758 // commutative case, "(A op' B) op (C op' A)"?
759 if (A == C || (InnerCommutative && A == D)) {
760 if (A != C)
761 std::swap(C, D);
762 // Consider forming "A op' (B op D)".
763 // If "B op D" simplifies then it can be formed with no cost.
764 V = simplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
765
766 // If "B op D" doesn't simplify then only go on if one of the existing
767 // operations "A op' B" and "C op' D" will be zapped as no longer used.
768 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
769 V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
770 if (V)
771 RetVal = Builder.CreateBinOp(InnerOpcode, A, V);
772 }
773 }
774
775 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
776 if (!RetVal && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) {
777 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
778 // commutative case, "(A op' B) op (B op' D)"?
779 if (B == D || (InnerCommutative && B == C)) {
780 if (B != D)
781 std::swap(C, D);
782 // Consider forming "(A op C) op' B".
783 // If "A op C" simplifies then it can be formed with no cost.
784 V = simplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
785
786 // If "A op C" doesn't simplify then only go on if one of the existing
787 // operations "A op' B" and "C op' D" will be zapped as no longer used.
788 if (!V && (LHS->hasOneUse() || RHS->hasOneUse()))
789 V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
790 if (V)
791 RetVal = Builder.CreateBinOp(InnerOpcode, V, B);
792 }
793 }
794
795 if (!RetVal)
796 return nullptr;
797
798 ++NumFactor;
799 RetVal->takeName(&I);
800
801 // Try to add no-overflow flags to the final value.
802 if (isa<BinaryOperator>(RetVal)) {
803 bool HasNSW = false;
804 bool HasNUW = false;
806 HasNSW = I.hasNoSignedWrap();
807 HasNUW = I.hasNoUnsignedWrap();
808 }
809 if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
810 HasNSW &= LOBO->hasNoSignedWrap();
811 HasNUW &= LOBO->hasNoUnsignedWrap();
812 }
813
814 if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
815 HasNSW &= ROBO->hasNoSignedWrap();
816 HasNUW &= ROBO->hasNoUnsignedWrap();
817 }
818
819 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
820 // We can propagate 'nsw' if we know that
821 // %Y = mul nsw i16 %X, C
822 // %Z = add nsw i16 %Y, %X
823 // =>
824 // %Z = mul nsw i16 %X, C+1
825 //
826 // iff C+1 isn't INT_MIN
827 const APInt *CInt;
828 if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
829 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
830
831 // nuw can be propagated with any constant or nuw value.
832 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
833 }
834 }
835 return RetVal;
836}
837
838// If `I` has one Const operand and the other matches `(ctpop (not x))`,
839// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
840// This is only useful is the new subtract can fold so we only handle the
841// following cases:
842// 1) (add/sub/disjoint_or C, (ctpop (not x))
843// -> (add/sub/disjoint_or C', (ctpop x))
844// 1) (cmp pred C, (ctpop (not x))
845// -> (cmp pred C', (ctpop x))
847 unsigned Opc = I->getOpcode();
848 unsigned ConstIdx = 1;
849 switch (Opc) {
850 default:
851 return nullptr;
852 // (ctpop (not x)) <-> (sub nuw nsw BitWidth(x) - (ctpop x))
853 // We can fold the BitWidth(x) with add/sub/icmp as long the other operand
854 // is constant.
855 case Instruction::Sub:
856 ConstIdx = 0;
857 break;
858 case Instruction::ICmp:
859 // Signed predicates aren't correct in some edge cases like for i2 types, as
860 // well since (ctpop x) is known [0, log2(BitWidth(x))] almost all signed
861 // comparisons against it are simplfied to unsigned.
862 if (cast<ICmpInst>(I)->isSigned())
863 return nullptr;
864 break;
865 case Instruction::Or:
866 if (!match(I, m_DisjointOr(m_Value(), m_Value())))
867 return nullptr;
868 [[fallthrough]];
869 case Instruction::Add:
870 break;
871 }
872
873 Value *Op;
874 // Find ctpop.
875 if (!match(I->getOperand(1 - ConstIdx),
877 return nullptr;
878
879 Constant *C;
880 // Check other operand is ImmConstant.
881 if (!match(I->getOperand(ConstIdx), m_ImmConstant(C)))
882 return nullptr;
883
884 Type *Ty = Op->getType();
885 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
886 // Need extra check for icmp. Note if this check is true, it generally means
887 // the icmp will simplify to true/false.
888 if (Opc == Instruction::ICmp && !cast<ICmpInst>(I)->isEquality()) {
889 Constant *Cmp =
891 if (!Cmp || !Cmp->isZeroValue())
892 return nullptr;
893 }
894
895 // Check we can invert `(not x)` for free.
896 bool Consumes = false;
897 if (!isFreeToInvert(Op, Op->hasOneUse(), Consumes) || !Consumes)
898 return nullptr;
899 Value *NotOp = getFreelyInverted(Op, Op->hasOneUse(), &Builder);
900 assert(NotOp != nullptr &&
901 "Desync between isFreeToInvert and getFreelyInverted");
902
903 Value *CtpopOfNotOp = Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
904
905 Value *R = nullptr;
906
907 // Do the transformation here to avoid potentially introducing an infinite
908 // loop.
909 switch (Opc) {
910 case Instruction::Sub:
911 R = Builder.CreateAdd(CtpopOfNotOp, ConstantExpr::getSub(C, BitWidthC));
912 break;
913 case Instruction::Or:
914 case Instruction::Add:
915 R = Builder.CreateSub(ConstantExpr::getAdd(C, BitWidthC), CtpopOfNotOp);
916 break;
917 case Instruction::ICmp:
918 R = Builder.CreateICmp(cast<ICmpInst>(I)->getSwappedPredicate(),
919 CtpopOfNotOp, ConstantExpr::getSub(BitWidthC, C));
920 break;
921 default:
922 llvm_unreachable("Unhandled Opcode");
923 }
924 assert(R != nullptr);
925 return replaceInstUsesWith(*I, R);
926}
927
928// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
929// IFF
930// 1) the logic_shifts match
931// 2) either both binops are binops and one is `and` or
932// BinOp1 is `and`
933// (logic_shift (inv_logic_shift C1, C), C) == C1 or
934//
935// -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
936//
937// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
938// IFF
939// 1) the logic_shifts match
940// 2) BinOp1 == BinOp2 (if BinOp == `add`, then also requires `shl`).
941//
942// -> (BinOp (logic_shift (BinOp X, Y)), Mask)
943//
944// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
945// IFF
946// 1) Binop1 is bitwise logical operator `and`, `or` or `xor`
947// 2) Binop2 is `not`
948//
949// -> (arithmetic_shift Binop1((not X), Y), Amt)
950
952 const DataLayout &DL = I.getDataLayout();
953 auto IsValidBinOpc = [](unsigned Opc) {
954 switch (Opc) {
955 default:
956 return false;
957 case Instruction::And:
958 case Instruction::Or:
959 case Instruction::Xor:
960 case Instruction::Add:
961 // Skip Sub as we only match constant masks which will canonicalize to use
962 // add.
963 return true;
964 }
965 };
966
967 // Check if we can distribute binop arbitrarily. `add` + `lshr` has extra
968 // constraints.
969 auto IsCompletelyDistributable = [](unsigned BinOpc1, unsigned BinOpc2,
970 unsigned ShOpc) {
971 assert(ShOpc != Instruction::AShr);
972 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
973 ShOpc == Instruction::Shl;
974 };
975
976 auto GetInvShift = [](unsigned ShOpc) {
977 assert(ShOpc != Instruction::AShr);
978 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
979 };
980
981 auto CanDistributeBinops = [&](unsigned BinOpc1, unsigned BinOpc2,
982 unsigned ShOpc, Constant *CMask,
983 Constant *CShift) {
984 // If the BinOp1 is `and` we don't need to check the mask.
985 if (BinOpc1 == Instruction::And)
986 return true;
987
988 // For all other possible transfers we need complete distributable
989 // binop/shift (anything but `add` + `lshr`).
990 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
991 return false;
992
993 // If BinOp2 is `and`, any mask works (this only really helps for non-splat
994 // vecs, otherwise the mask will be simplified and the following check will
995 // handle it).
996 if (BinOpc2 == Instruction::And)
997 return true;
998
999 // Otherwise, need mask that meets the below requirement.
1000 // (logic_shift (inv_logic_shift Mask, ShAmt), ShAmt) == Mask
1001 Constant *MaskInvShift =
1002 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1003 return ConstantFoldBinaryOpOperands(ShOpc, MaskInvShift, CShift, DL) ==
1004 CMask;
1005 };
1006
1007 auto MatchBinOp = [&](unsigned ShOpnum) -> Instruction * {
1008 Constant *CMask, *CShift;
1009 Value *X, *Y, *ShiftedX, *Mask, *Shift;
1010 if (!match(I.getOperand(ShOpnum),
1011 m_OneUse(m_Shift(m_Value(Y), m_Value(Shift)))))
1012 return nullptr;
1013 if (!match(I.getOperand(1 - ShOpnum),
1015 m_OneUse(m_Shift(m_Value(X), m_Specific(Shift))),
1016 m_Value(ShiftedX)),
1017 m_Value(Mask))))
1018 return nullptr;
1019 // Make sure we are matching instruction shifts and not ConstantExpr
1020 auto *IY = dyn_cast<Instruction>(I.getOperand(ShOpnum));
1021 auto *IX = dyn_cast<Instruction>(ShiftedX);
1022 if (!IY || !IX)
1023 return nullptr;
1024
1025 // LHS and RHS need same shift opcode
1026 unsigned ShOpc = IY->getOpcode();
1027 if (ShOpc != IX->getOpcode())
1028 return nullptr;
1029
1030 // Make sure binop is real instruction and not ConstantExpr
1031 auto *BO2 = dyn_cast<Instruction>(I.getOperand(1 - ShOpnum));
1032 if (!BO2)
1033 return nullptr;
1034
1035 unsigned BinOpc = BO2->getOpcode();
1036 // Make sure we have valid binops.
1037 if (!IsValidBinOpc(I.getOpcode()) || !IsValidBinOpc(BinOpc))
1038 return nullptr;
1039
1040 if (ShOpc == Instruction::AShr) {
1041 if (Instruction::isBitwiseLogicOp(I.getOpcode()) &&
1042 BinOpc == Instruction::Xor && match(Mask, m_AllOnes())) {
1043 Value *NotX = Builder.CreateNot(X);
1044 Value *NewBinOp = Builder.CreateBinOp(I.getOpcode(), Y, NotX);
1046 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp, Shift);
1047 }
1048
1049 return nullptr;
1050 }
1051
1052 // If BinOp1 == BinOp2 and it's bitwise or shl with add, then just
1053 // distribute to drop the shift irrelevant of constants.
1054 if (BinOpc == I.getOpcode() &&
1055 IsCompletelyDistributable(I.getOpcode(), BinOpc, ShOpc)) {
1056 Value *NewBinOp2 = Builder.CreateBinOp(I.getOpcode(), X, Y);
1057 Value *NewBinOp1 = Builder.CreateBinOp(
1058 static_cast<Instruction::BinaryOps>(ShOpc), NewBinOp2, Shift);
1059 return BinaryOperator::Create(I.getOpcode(), NewBinOp1, Mask);
1060 }
1061
1062 // Otherwise we can only distribute by constant shifting the mask, so
1063 // ensure we have constants.
1064 if (!match(Shift, m_ImmConstant(CShift)))
1065 return nullptr;
1066 if (!match(Mask, m_ImmConstant(CMask)))
1067 return nullptr;
1068
1069 // Check if we can distribute the binops.
1070 if (!CanDistributeBinops(I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1071 return nullptr;
1072
1073 Constant *NewCMask =
1074 ConstantFoldBinaryOpOperands(GetInvShift(ShOpc), CMask, CShift, DL);
1075 Value *NewBinOp2 = Builder.CreateBinOp(
1076 static_cast<Instruction::BinaryOps>(BinOpc), X, NewCMask);
1077 Value *NewBinOp1 = Builder.CreateBinOp(I.getOpcode(), Y, NewBinOp2);
1078 return BinaryOperator::Create(static_cast<Instruction::BinaryOps>(ShOpc),
1079 NewBinOp1, CShift);
1080 };
1081
1082 if (Instruction *R = MatchBinOp(0))
1083 return R;
1084 return MatchBinOp(1);
1085}
1086
1087// (Binop (zext C), (select C, T, F))
1088// -> (select C, (binop 1, T), (binop 0, F))
1089//
1090// (Binop (sext C), (select C, T, F))
1091// -> (select C, (binop -1, T), (binop 0, F))
1092//
1093// Attempt to simplify binary operations into a select with folded args, when
1094// one operand of the binop is a select instruction and the other operand is a
1095// zext/sext extension, whose value is the select condition.
1098 // TODO: this simplification may be extended to any speculatable instruction,
1099 // not just binops, and would possibly be handled better in FoldOpIntoSelect.
1100 Instruction::BinaryOps Opc = I.getOpcode();
1101 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1102 Value *A, *CondVal, *TrueVal, *FalseVal;
1103 Value *CastOp;
1104
1105 auto MatchSelectAndCast = [&](Value *CastOp, Value *SelectOp) {
1106 return match(CastOp, m_ZExtOrSExt(m_Value(A))) &&
1107 A->getType()->getScalarSizeInBits() == 1 &&
1108 match(SelectOp, m_Select(m_Value(CondVal), m_Value(TrueVal),
1109 m_Value(FalseVal)));
1110 };
1111
1112 // Make sure one side of the binop is a select instruction, and the other is a
1113 // zero/sign extension operating on a i1.
1114 if (MatchSelectAndCast(LHS, RHS))
1115 CastOp = LHS;
1116 else if (MatchSelectAndCast(RHS, LHS))
1117 CastOp = RHS;
1118 else
1119 return nullptr;
1120
1121 auto NewFoldedConst = [&](bool IsTrueArm, Value *V) {
1122 bool IsCastOpRHS = (CastOp == RHS);
1123 bool IsZExt = isa<ZExtInst>(CastOp);
1124 Constant *C;
1125
1126 if (IsTrueArm) {
1127 C = Constant::getNullValue(V->getType());
1128 } else if (IsZExt) {
1129 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1130 C = Constant::getIntegerValue(V->getType(), APInt(BitWidth, 1));
1131 } else {
1132 C = Constant::getAllOnesValue(V->getType());
1133 }
1134
1135 return IsCastOpRHS ? Builder.CreateBinOp(Opc, V, C)
1136 : Builder.CreateBinOp(Opc, C, V);
1137 };
1138
1139 // If the value used in the zext/sext is the select condition, or the negated
1140 // of the select condition, the binop can be simplified.
1141 if (CondVal == A) {
1142 Value *NewTrueVal = NewFoldedConst(false, TrueVal);
1143 return SelectInst::Create(CondVal, NewTrueVal,
1144 NewFoldedConst(true, FalseVal));
1145 }
1146
1147 if (match(A, m_Not(m_Specific(CondVal)))) {
1148 Value *NewTrueVal = NewFoldedConst(true, TrueVal);
1149 return SelectInst::Create(CondVal, NewTrueVal,
1150 NewFoldedConst(false, FalseVal));
1151 }
1152
1153 return nullptr;
1154}
1155
1157 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1160 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1161 Value *A, *B, *C, *D;
1162 Instruction::BinaryOps LHSOpcode, RHSOpcode;
1163
1164 if (Op0)
1165 LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B, Op1);
1166 if (Op1)
1167 RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D, Op0);
1168
1169 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
1170 // a common term.
1171 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1172 if (Value *V = tryFactorization(I, SQ, Builder, LHSOpcode, A, B, C, D))
1173 return V;
1174
1175 // The instruction has the form "(A op' B) op (C)". Try to factorize common
1176 // term.
1177 if (Op0)
1178 if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
1179 if (Value *V =
1180 tryFactorization(I, SQ, Builder, LHSOpcode, A, B, RHS, Ident))
1181 return V;
1182
1183 // The instruction has the form "(B) op (C op' D)". Try to factorize common
1184 // term.
1185 if (Op1)
1186 if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
1187 if (Value *V =
1188 tryFactorization(I, SQ, Builder, RHSOpcode, LHS, Ident, C, D))
1189 return V;
1190
1191 return nullptr;
1192}
1193
1194/// This tries to simplify binary operations which some other binary operation
1195/// distributes over either by factorizing out common terms
1196/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
1197/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
1198/// Returns the simplified value, or null if it didn't simplify.
1200 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1203 Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
1204
1205 // Factorization.
1206 if (Value *R = tryFactorizationFolds(I))
1207 return R;
1208
1209 // Expansion.
1210 if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
1211 // The instruction has the form "(A op' B) op C". See if expanding it out
1212 // to "(A op C) op' (B op C)" results in simplifications.
1213 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
1214 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
1215
1216 // Disable the use of undef because it's not safe to distribute undef.
1217 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1218 Value *L = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1219 Value *R = simplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
1220
1221 // Do "A op C" and "B op C" both simplify?
1222 if (L && R) {
1223 // They do! Return "L op' R".
1224 ++NumExpand;
1225 C = Builder.CreateBinOp(InnerOpcode, L, R);
1226 C->takeName(&I);
1227 return C;
1228 }
1229
1230 // Does "A op C" simplify to the identity value for the inner opcode?
1231 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1232 // They do! Return "B op C".
1233 ++NumExpand;
1234 C = Builder.CreateBinOp(TopLevelOpcode, B, C);
1235 C->takeName(&I);
1236 return C;
1237 }
1238
1239 // Does "B op C" simplify to the identity value for the inner opcode?
1240 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1241 // They do! Return "A op C".
1242 ++NumExpand;
1243 C = Builder.CreateBinOp(TopLevelOpcode, A, C);
1244 C->takeName(&I);
1245 return C;
1246 }
1247 }
1248
1249 if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
1250 // The instruction has the form "A op (B op' C)". See if expanding it out
1251 // to "(A op B) op' (A op C)" results in simplifications.
1252 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
1253 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
1254
1255 // Disable the use of undef because it's not safe to distribute undef.
1256 auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
1257 Value *L = simplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
1258 Value *R = simplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
1259
1260 // Do "A op B" and "A op C" both simplify?
1261 if (L && R) {
1262 // They do! Return "L op' R".
1263 ++NumExpand;
1264 A = Builder.CreateBinOp(InnerOpcode, L, R);
1265 A->takeName(&I);
1266 return A;
1267 }
1268
1269 // Does "A op B" simplify to the identity value for the inner opcode?
1270 if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
1271 // They do! Return "A op C".
1272 ++NumExpand;
1273 A = Builder.CreateBinOp(TopLevelOpcode, A, C);
1274 A->takeName(&I);
1275 return A;
1276 }
1277
1278 // Does "A op C" simplify to the identity value for the inner opcode?
1279 if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
1280 // They do! Return "A op B".
1281 ++NumExpand;
1282 A = Builder.CreateBinOp(TopLevelOpcode, A, B);
1283 A->takeName(&I);
1284 return A;
1285 }
1286 }
1287
1288 return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
1289}
1290
1291static std::optional<std::pair<Value *, Value *>>
1293 if (LHS->getParent() != RHS->getParent())
1294 return std::nullopt;
1295
1296 if (LHS->getNumIncomingValues() < 2)
1297 return std::nullopt;
1298
1299 if (!equal(LHS->blocks(), RHS->blocks()))
1300 return std::nullopt;
1301
1302 Value *L0 = LHS->getIncomingValue(0);
1303 Value *R0 = RHS->getIncomingValue(0);
1304
1305 for (unsigned I = 1, E = LHS->getNumIncomingValues(); I != E; ++I) {
1306 Value *L1 = LHS->getIncomingValue(I);
1307 Value *R1 = RHS->getIncomingValue(I);
1308
1309 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1310 continue;
1311
1312 return std::nullopt;
1313 }
1314
1315 return std::optional(std::pair(L0, R0));
1316}
1317
1318std::optional<std::pair<Value *, Value *>>
1319InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {
1322 if (!LHSInst || !RHSInst || LHSInst->getOpcode() != RHSInst->getOpcode())
1323 return std::nullopt;
1324 switch (LHSInst->getOpcode()) {
1325 case Instruction::PHI:
1327 case Instruction::Select: {
1328 Value *Cond = LHSInst->getOperand(0);
1329 Value *TrueVal = LHSInst->getOperand(1);
1330 Value *FalseVal = LHSInst->getOperand(2);
1331 if (Cond == RHSInst->getOperand(0) && TrueVal == RHSInst->getOperand(2) &&
1332 FalseVal == RHSInst->getOperand(1))
1333 return std::pair(TrueVal, FalseVal);
1334 return std::nullopt;
1335 }
1336 case Instruction::Call: {
1337 // Match min(a, b) and max(a, b)
1338 MinMaxIntrinsic *LHSMinMax = dyn_cast<MinMaxIntrinsic>(LHSInst);
1339 MinMaxIntrinsic *RHSMinMax = dyn_cast<MinMaxIntrinsic>(RHSInst);
1340 if (LHSMinMax && RHSMinMax &&
1341 LHSMinMax->getPredicate() ==
1343 ((LHSMinMax->getLHS() == RHSMinMax->getLHS() &&
1344 LHSMinMax->getRHS() == RHSMinMax->getRHS()) ||
1345 (LHSMinMax->getLHS() == RHSMinMax->getRHS() &&
1346 LHSMinMax->getRHS() == RHSMinMax->getLHS())))
1347 return std::pair(LHSMinMax->getLHS(), LHSMinMax->getRHS());
1348 return std::nullopt;
1349 }
1350 default:
1351 return std::nullopt;
1352 }
1353}
1354
1356 Value *LHS,
1357 Value *RHS) {
1358 Value *A, *B, *C, *D, *E, *F;
1359 bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
1360 bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
1361 if (!LHSIsSelect && !RHSIsSelect)
1362 return nullptr;
1363
1364 FastMathFlags FMF;
1366 if (isa<FPMathOperator>(&I)) {
1367 FMF = I.getFastMathFlags();
1368 Builder.setFastMathFlags(FMF);
1369 }
1370
1371 Instruction::BinaryOps Opcode = I.getOpcode();
1372 SimplifyQuery Q = SQ.getWithInstruction(&I);
1373
1374 Value *Cond, *True = nullptr, *False = nullptr;
1375
1376 // Special-case for add/negate combination. Replace the zero in the negation
1377 // with the trailing add operand:
1378 // (Cond ? TVal : -N) + Z --> Cond ? True : (Z - N)
1379 // (Cond ? -N : FVal) + Z --> Cond ? (Z - N) : False
1380 auto foldAddNegate = [&](Value *TVal, Value *FVal, Value *Z) -> Value * {
1381 // We need an 'add' and exactly 1 arm of the select to have been simplified.
1382 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1383 return nullptr;
1384
1385 Value *N;
1386 if (True && match(FVal, m_Neg(m_Value(N)))) {
1387 Value *Sub = Builder.CreateSub(Z, N);
1388 return Builder.CreateSelect(Cond, True, Sub, I.getName());
1389 }
1390 if (False && match(TVal, m_Neg(m_Value(N)))) {
1391 Value *Sub = Builder.CreateSub(Z, N);
1392 return Builder.CreateSelect(Cond, Sub, False, I.getName());
1393 }
1394 return nullptr;
1395 };
1396
1397 if (LHSIsSelect && RHSIsSelect && A == D) {
1398 // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
1399 Cond = A;
1400 True = simplifyBinOp(Opcode, B, E, FMF, Q);
1401 False = simplifyBinOp(Opcode, C, F, FMF, Q);
1402
1403 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1404 if (False && !True)
1405 True = Builder.CreateBinOp(Opcode, B, E);
1406 else if (True && !False)
1407 False = Builder.CreateBinOp(Opcode, C, F);
1408 }
1409 } else if (LHSIsSelect && LHS->hasOneUse()) {
1410 // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
1411 Cond = A;
1412 True = simplifyBinOp(Opcode, B, RHS, FMF, Q);
1413 False = simplifyBinOp(Opcode, C, RHS, FMF, Q);
1414 if (Value *NewSel = foldAddNegate(B, C, RHS))
1415 return NewSel;
1416 } else if (RHSIsSelect && RHS->hasOneUse()) {
1417 // X op (D ? E : F) -> D ? (X op E) : (X op F)
1418 Cond = D;
1419 True = simplifyBinOp(Opcode, LHS, E, FMF, Q);
1420 False = simplifyBinOp(Opcode, LHS, F, FMF, Q);
1421 if (Value *NewSel = foldAddNegate(E, F, LHS))
1422 return NewSel;
1423 }
1424
1425 if (!True || !False)
1426 return nullptr;
1427
1428 Value *SI = Builder.CreateSelect(Cond, True, False);
1429 SI->takeName(&I);
1430 return SI;
1431}
1432
1433/// Freely adapt every user of V as-if V was changed to !V.
1434/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
1436 assert(!isa<Constant>(I) && "Shouldn't invert users of constant");
1437 for (User *U : make_early_inc_range(I->users())) {
1438 if (U == IgnoredUser)
1439 continue; // Don't consider this user.
1440 switch (cast<Instruction>(U)->getOpcode()) {
1441 case Instruction::Select: {
1442 auto *SI = cast<SelectInst>(U);
1443 SI->swapValues();
1444 SI->swapProfMetadata();
1445 break;
1446 }
1447 case Instruction::Br: {
1449 BI->swapSuccessors(); // swaps prof metadata too
1450 if (BPI)
1451 BPI->swapSuccEdgesProbabilities(BI->getParent());
1452 break;
1453 }
1454 case Instruction::Xor:
1456 // Add to worklist for DCE.
1458 break;
1459 default:
1460 llvm_unreachable("Got unexpected user - out of sync with "
1461 "canFreelyInvertAllUsersOf() ?");
1462 }
1463 }
1464
1465 // Update pre-existing debug value uses.
1466 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1467 llvm::findDbgValues(I, DbgVariableRecords);
1468
1469 for (DbgVariableRecord *DbgVal : DbgVariableRecords) {
1470 SmallVector<uint64_t, 1> Ops = {dwarf::DW_OP_not};
1471 for (unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1472 Idx != End; ++Idx)
1473 if (DbgVal->getVariableLocationOp(Idx) == I)
1474 DbgVal->setExpression(
1475 DIExpression::appendOpsToArg(DbgVal->getExpression(), Ops, Idx));
1476 }
1477}
1478
1479/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
1480/// constant zero (which is the 'negate' form).
1481Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
1482 Value *NegV;
1483 if (match(V, m_Neg(m_Value(NegV))))
1484 return NegV;
1485
1486 // Constants can be considered to be negated values if they can be folded.
1488 return ConstantExpr::getNeg(C);
1489
1491 if (C->getType()->getElementType()->isIntegerTy())
1492 return ConstantExpr::getNeg(C);
1493
1495 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1496 Constant *Elt = CV->getAggregateElement(i);
1497 if (!Elt)
1498 return nullptr;
1499
1500 if (isa<UndefValue>(Elt))
1501 continue;
1502
1503 if (!isa<ConstantInt>(Elt))
1504 return nullptr;
1505 }
1506 return ConstantExpr::getNeg(CV);
1507 }
1508
1509 // Negate integer vector splats.
1510 if (auto *CV = dyn_cast<Constant>(V))
1511 if (CV->getType()->isVectorTy() &&
1512 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1513 return ConstantExpr::getNeg(CV);
1514
1515 return nullptr;
1516}
1517
1518// Try to fold:
1519// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1520// -> ({s|u}itofp (int_binop x, y))
1521// 2) (fp_binop ({s|u}itofp x), FpC)
1522// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1523//
1524// Assuming the sign of the cast for x/y is `OpsFromSigned`.
1525Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1526 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
1528
1529 Type *FPTy = BO.getType();
1530 Type *IntTy = IntOps[0]->getType();
1531
1532 unsigned IntSz = IntTy->getScalarSizeInBits();
1533 // This is the maximum number of inuse bits by the integer where the int -> fp
1534 // casts are exact.
1535 unsigned MaxRepresentableBits =
1537
1538 // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
1539 // checks later on.
1540 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1541
1542 // NB: This only comes up if OpsFromSigned is true, so there is no need to
1543 // cache if between calls to `foldFBinOpOfIntCastsFromSign`.
1544 auto IsNonZero = [&](unsigned OpNo) -> bool {
1545 if (OpsKnown[OpNo].hasKnownBits() &&
1546 OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
1547 return true;
1548 return isKnownNonZero(IntOps[OpNo], SQ);
1549 };
1550
1551 auto IsNonNeg = [&](unsigned OpNo) -> bool {
1552 // NB: This matches the impl in ValueTracking, we just try to use cached
1553 // knownbits here. If we ever start supporting WithCache for
1554 // `isKnownNonNegative`, change this to an explicit call.
1555 return OpsKnown[OpNo].getKnownBits(SQ).isNonNegative();
1556 };
1557
1558 // Check if we know for certain that ({s|u}itofp op) is exact.
1559 auto IsValidPromotion = [&](unsigned OpNo) -> bool {
1560 // Can we treat this operand as the desired sign?
1561 if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(OpNo)) &&
1562 !IsNonNeg(OpNo))
1563 return false;
1564
1565 // If fp precision >= bitwidth(op) then its exact.
1566 // NB: This is slightly conservative for `sitofp`. For signed conversion, we
1567 // can handle `MaxRepresentableBits == IntSz - 1` as the sign bit will be
1568 // handled specially. We can't, however, increase the bound arbitrarily for
1569 // `sitofp` as for larger sizes, it won't sign extend.
1570 if (MaxRepresentableBits < IntSz) {
1571 // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
1572 // numSignBits(op).
1573 // TODO: If we add support for `WithCache` in `ComputeNumSignBits`, change
1574 // `IntOps[OpNo]` arguments to `KnownOps[OpNo]`.
1575 if (OpsFromSigned)
1576 NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
1577 // Finally for unsigned check that fp precision >= bitwidth(op) -
1578 // numLeadingZeros(op).
1579 else {
1580 NumUsedLeadingBits[OpNo] =
1581 IntSz - OpsKnown[OpNo].getKnownBits(SQ).countMinLeadingZeros();
1582 }
1583 }
1584 // NB: We could also check if op is known to be a power of 2 or zero (which
1585 // will always be representable). Its unlikely, however, that is we are
1586 // unable to bound op in any way we will be able to pass the overflow checks
1587 // later on.
1588
1589 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1590 return false;
1591 // Signed + Mul also requires that op is non-zero to avoid -0 cases.
1592 return !OpsFromSigned || BO.getOpcode() != Instruction::FMul ||
1593 IsNonZero(OpNo);
1594 };
1595
1596 // If we have a constant rhs, see if we can losslessly convert it to an int.
1597 if (Op1FpC != nullptr) {
1598 // Signed + Mul req non-zero
1599 if (OpsFromSigned && BO.getOpcode() == Instruction::FMul &&
1600 !match(Op1FpC, m_NonZeroFP()))
1601 return nullptr;
1602
1604 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1605 IntTy, DL);
1606 if (Op1IntC == nullptr)
1607 return nullptr;
1608 if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
1609 : Instruction::UIToFP,
1610 Op1IntC, FPTy, DL) != Op1FpC)
1611 return nullptr;
1612
1613 // First try to keep sign of cast the same.
1614 IntOps[1] = Op1IntC;
1615 }
1616
1617 // Ensure lhs/rhs integer types match.
1618 if (IntTy != IntOps[1]->getType())
1619 return nullptr;
1620
1621 if (Op1FpC == nullptr) {
1622 if (!IsValidPromotion(1))
1623 return nullptr;
1624 }
1625 if (!IsValidPromotion(0))
1626 return nullptr;
1627
1628 // Final we check if the integer version of the binop will not overflow.
1630 // Because of the precision check, we can often rule out overflows.
1631 bool NeedsOverflowCheck = true;
1632 // Try to conservatively rule out overflow based on the already done precision
1633 // checks.
1634 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1635 unsigned OverflowMaxCurBits =
1636 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1637 bool OutputSigned = OpsFromSigned;
1638 switch (BO.getOpcode()) {
1639 case Instruction::FAdd:
1640 IntOpc = Instruction::Add;
1641 OverflowMaxOutputBits += OverflowMaxCurBits;
1642 break;
1643 case Instruction::FSub:
1644 IntOpc = Instruction::Sub;
1645 OverflowMaxOutputBits += OverflowMaxCurBits;
1646 break;
1647 case Instruction::FMul:
1648 IntOpc = Instruction::Mul;
1649 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1650 break;
1651 default:
1652 llvm_unreachable("Unsupported binop");
1653 }
1654 // The precision check may have already ruled out overflow.
1655 if (OverflowMaxOutputBits < IntSz) {
1656 NeedsOverflowCheck = false;
1657 // We can bound unsigned overflow from sub to in range signed value (this is
1658 // what allows us to avoid the overflow check for sub).
1659 if (IntOpc == Instruction::Sub)
1660 OutputSigned = true;
1661 }
1662
1663 // Precision check did not rule out overflow, so need to check.
1664 // TODO: If we add support for `WithCache` in `willNotOverflow`, change
1665 // `IntOps[...]` arguments to `KnownOps[...]`.
1666 if (NeedsOverflowCheck &&
1667 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1668 return nullptr;
1669
1670 Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1671 if (auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1672 IntBO->setHasNoSignedWrap(OutputSigned);
1673 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1674 }
1675 if (OutputSigned)
1676 return new SIToFPInst(IntBinOp, FPTy);
1677 return new UIToFPInst(IntBinOp, FPTy);
1678}
1679
1680// Try to fold:
1681// 1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
1682// -> ({s|u}itofp (int_binop x, y))
1683// 2) (fp_binop ({s|u}itofp x), FpC)
1684// -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
1685Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
1686 std::array<Value *, 2> IntOps = {nullptr, nullptr};
1687 Constant *Op1FpC = nullptr;
1688 // Check for:
1689 // 1) (binop ({s|u}itofp x), ({s|u}itofp y))
1690 // 2) (binop ({s|u}itofp x), FpC)
1691 if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
1692 !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
1693 return nullptr;
1694
1695 if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
1696 !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
1697 !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
1698 return nullptr;
1699
1700 // Cache KnownBits a bit to potentially save some analysis.
1701 SmallVector<WithCache<const Value *>, 2> OpsKnown = {IntOps[0], IntOps[1]};
1702
1703 // Try treating x/y as coming from both `uitofp` and `sitofp`. There are
1704 // different constraints depending on the sign of the cast.
1705 // NB: `(uitofp nneg X)` == `(sitofp nneg X)`.
1706 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/false,
1707 IntOps, Op1FpC, OpsKnown))
1708 return R;
1709 return foldFBinOpOfIntCastsFromSign(BO, /*OpsFromSigned=*/true, IntOps,
1710 Op1FpC, OpsKnown);
1711}
1712
1713/// A binop with a constant operand and a sign-extended boolean operand may be
1714/// converted into a select of constants by applying the binary operation to
1715/// the constant with the two possible values of the extended boolean (0 or -1).
1716Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
1717 // TODO: Handle non-commutative binop (constant is operand 0).
1718 // TODO: Handle zext.
1719 // TODO: Peek through 'not' of cast.
1720 Value *BO0 = BO.getOperand(0);
1721 Value *BO1 = BO.getOperand(1);
1722 Value *X;
1723 Constant *C;
1724 if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
1725 !X->getType()->isIntOrIntVectorTy(1))
1726 return nullptr;
1727
1728 // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
1731 Value *TVal = Builder.CreateBinOp(BO.getOpcode(), Ones, C);
1732 Value *FVal = Builder.CreateBinOp(BO.getOpcode(), Zero, C);
1733 return SelectInst::Create(X, TVal, FVal);
1734}
1735
1737 bool IsTrueArm) {
1739 for (Value *Op : I.operands()) {
1740 Value *V = nullptr;
1741 if (Op == SI) {
1742 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1743 } else if (match(SI->getCondition(),
1746 m_Specific(Op), m_Value(V))) &&
1748 // Pass
1749 } else {
1750 V = Op;
1751 }
1752 Ops.push_back(V);
1753 }
1754
1755 return simplifyInstructionWithOperands(&I, Ops, I.getDataLayout());
1756}
1757
1759 Value *NewOp, InstCombiner &IC) {
1760 Instruction *Clone = I.clone();
1761 Clone->replaceUsesOfWith(SI, NewOp);
1763 IC.InsertNewInstBefore(Clone, I.getIterator());
1764 return Clone;
1765}
1766
1768 bool FoldWithMultiUse) {
1769 // Don't modify shared select instructions unless set FoldWithMultiUse
1770 if (!SI->hasOneUse() && !FoldWithMultiUse)
1771 return nullptr;
1772
1773 Value *TV = SI->getTrueValue();
1774 Value *FV = SI->getFalseValue();
1775
1776 // Bool selects with constant operands can be folded to logical ops.
1777 if (SI->getType()->isIntOrIntVectorTy(1))
1778 return nullptr;
1779
1780 // Avoid breaking min/max reduction pattern,
1781 // which is necessary for vectorization later.
1783 for (Value *IntrinOp : Op.operands())
1784 if (auto *PN = dyn_cast<PHINode>(IntrinOp))
1785 for (Value *PhiOp : PN->operands())
1786 if (PhiOp == &Op)
1787 return nullptr;
1788
1789 // Test if a FCmpInst instruction is used exclusively by a select as
1790 // part of a minimum or maximum operation. If so, refrain from doing
1791 // any other folding. This helps out other analyses which understand
1792 // non-obfuscated minimum and maximum idioms. And in this case, at
1793 // least one of the comparison operands has at least one user besides
1794 // the compare (the select), which would often largely negate the
1795 // benefit of folding anyway.
1796 if (auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1797 if (CI->hasOneUse()) {
1798 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1799 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1800 !CI->isCommutative())
1801 return nullptr;
1802 }
1803 }
1804
1805 // Make sure that one of the select arms folds successfully.
1806 Value *NewTV = simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/true);
1807 Value *NewFV =
1808 simplifyOperationIntoSelectOperand(Op, SI, /*IsTrueArm=*/false);
1809 if (!NewTV && !NewFV)
1810 return nullptr;
1811
1812 // Create an instruction for the arm that did not fold.
1813 if (!NewTV)
1814 NewTV = foldOperationIntoSelectOperand(Op, SI, TV, *this);
1815 if (!NewFV)
1816 NewFV = foldOperationIntoSelectOperand(Op, SI, FV, *this);
1817 return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1818}
1819
1821 Value *InValue, BasicBlock *InBB,
1822 const DataLayout &DL,
1823 const SimplifyQuery SQ) {
1824 // NB: It is a precondition of this transform that the operands be
1825 // phi translatable!
1827 for (Value *Op : I.operands()) {
1828 if (Op == PN)
1829 Ops.push_back(InValue);
1830 else
1831 Ops.push_back(Op->DoPHITranslation(PN->getParent(), InBB));
1832 }
1833
1834 // Don't consider the simplification successful if we get back a constant
1835 // expression. That's just an instruction in hiding.
1836 // Also reject the case where we simplify back to the phi node. We wouldn't
1837 // be able to remove it in that case.
1839 &I, Ops, SQ.getWithInstruction(InBB->getTerminator()));
1840 if (NewVal && NewVal != PN && !match(NewVal, m_ConstantExpr()))
1841 return NewVal;
1842
1843 // Check if incoming PHI value can be replaced with constant
1844 // based on implied condition.
1845 BranchInst *TerminatorBI = dyn_cast<BranchInst>(InBB->getTerminator());
1846 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I);
1847 if (TerminatorBI && TerminatorBI->isConditional() &&
1848 TerminatorBI->getSuccessor(0) != TerminatorBI->getSuccessor(1) && ICmp) {
1849 bool LHSIsTrue = TerminatorBI->getSuccessor(0) == PN->getParent();
1850 std::optional<bool> ImpliedCond = isImpliedCondition(
1851 TerminatorBI->getCondition(), ICmp->getCmpPredicate(), Ops[0], Ops[1],
1852 DL, LHSIsTrue);
1853 if (ImpliedCond)
1854 return ConstantInt::getBool(I.getType(), ImpliedCond.value());
1855 }
1856
1857 return nullptr;
1858}
1859
1861 bool AllowMultipleUses) {
1862 unsigned NumPHIValues = PN->getNumIncomingValues();
1863 if (NumPHIValues == 0)
1864 return nullptr;
1865
1866 // We normally only transform phis with a single use. However, if a PHI has
1867 // multiple uses and they are all the same operation, we can fold *all* of the
1868 // uses into the PHI.
1869 bool OneUse = PN->hasOneUse();
1870 bool IdenticalUsers = false;
1871 if (!AllowMultipleUses && !OneUse) {
1872 // Walk the use list for the instruction, comparing them to I.
1873 for (User *U : PN->users()) {
1875 if (UI != &I && !I.isIdenticalTo(UI))
1876 return nullptr;
1877 }
1878 // Otherwise, we can replace *all* users with the new PHI we form.
1879 IdenticalUsers = true;
1880 }
1881
1882 // Check that all operands are phi-translatable.
1883 for (Value *Op : I.operands()) {
1884 if (Op == PN)
1885 continue;
1886
1887 // Non-instructions never require phi-translation.
1888 auto *I = dyn_cast<Instruction>(Op);
1889 if (!I)
1890 continue;
1891
1892 // Phi-translate can handle phi nodes in the same block.
1893 if (isa<PHINode>(I))
1894 if (I->getParent() == PN->getParent())
1895 continue;
1896
1897 // Operand dominates the block, no phi-translation necessary.
1898 if (DT.dominates(I, PN->getParent()))
1899 continue;
1900
1901 // Not phi-translatable, bail out.
1902 return nullptr;
1903 }
1904
1905 // Check to see whether the instruction can be folded into each phi operand.
1906 // If there is one operand that does not fold, remember the BB it is in.
1907 SmallVector<Value *> NewPhiValues;
1908 SmallVector<unsigned int> OpsToMoveUseToIncomingBB;
1909 bool SeenNonSimplifiedInVal = false;
1910 for (unsigned i = 0; i != NumPHIValues; ++i) {
1911 Value *InVal = PN->getIncomingValue(i);
1912 BasicBlock *InBB = PN->getIncomingBlock(i);
1913
1914 if (auto *NewVal = simplifyInstructionWithPHI(I, PN, InVal, InBB, DL, SQ)) {
1915 NewPhiValues.push_back(NewVal);
1916 continue;
1917 }
1918
1919 // Handle some cases that can't be fully simplified, but where we know that
1920 // the two instructions will fold into one.
1921 auto WillFold = [&]() {
1922 if (!InVal->hasUseList() || !InVal->hasOneUser())
1923 return false;
1924
1925 // icmp of ucmp/scmp with constant will fold to icmp.
1926 const APInt *Ignored;
1927 if (isa<CmpIntrinsic>(InVal) &&
1928 match(&I, m_ICmp(m_Specific(PN), m_APInt(Ignored))))
1929 return true;
1930
1931 // icmp eq zext(bool), 0 will fold to !bool.
1932 if (isa<ZExtInst>(InVal) &&
1933 cast<ZExtInst>(InVal)->getSrcTy()->isIntOrIntVectorTy(1) &&
1934 match(&I,
1936 return true;
1937
1938 return false;
1939 };
1940
1941 if (WillFold()) {
1942 OpsToMoveUseToIncomingBB.push_back(i);
1943 NewPhiValues.push_back(nullptr);
1944 continue;
1945 }
1946
1947 if (!OneUse && !IdenticalUsers)
1948 return nullptr;
1949
1950 if (SeenNonSimplifiedInVal)
1951 return nullptr; // More than one non-simplified value.
1952 SeenNonSimplifiedInVal = true;
1953
1954 // If there is exactly one non-simplified value, we can insert a copy of the
1955 // operation in that block. However, if this is a critical edge, we would
1956 // be inserting the computation on some other paths (e.g. inside a loop).
1957 // Only do this if the pred block is unconditionally branching into the phi
1958 // block. Also, make sure that the pred block is not dead code.
1960 if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(InBB))
1961 return nullptr;
1962
1963 NewPhiValues.push_back(nullptr);
1964 OpsToMoveUseToIncomingBB.push_back(i);
1965
1966 // If the InVal is an invoke at the end of the pred block, then we can't
1967 // insert a computation after it without breaking the edge.
1968 if (isa<InvokeInst>(InVal))
1969 if (cast<Instruction>(InVal)->getParent() == InBB)
1970 return nullptr;
1971
1972 // Do not push the operation across a loop backedge. This could result in
1973 // an infinite combine loop, and is generally non-profitable (especially
1974 // if the operation was originally outside the loop).
1975 if (isBackEdge(InBB, PN->getParent()))
1976 return nullptr;
1977 }
1978
1979 // Clone the instruction that uses the phi node and move it into the incoming
1980 // BB because we know that the next iteration of InstCombine will simplify it.
1982 for (auto OpIndex : OpsToMoveUseToIncomingBB) {
1984 BasicBlock *OpBB = PN->getIncomingBlock(OpIndex);
1985
1986 Instruction *Clone = Clones.lookup(OpBB);
1987 if (!Clone) {
1988 Clone = I.clone();
1989 for (Use &U : Clone->operands()) {
1990 if (U == PN)
1991 U = Op;
1992 else
1993 U = U->DoPHITranslation(PN->getParent(), OpBB);
1994 }
1995 Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
1996 Clones.insert({OpBB, Clone});
1997 // We may have speculated the instruction.
1999 }
2000
2001 NewPhiValues[OpIndex] = Clone;
2002 }
2003
2004 // Okay, we can do the transformation: create the new PHI node.
2005 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
2006 InsertNewInstBefore(NewPN, PN->getIterator());
2007 NewPN->takeName(PN);
2008 NewPN->setDebugLoc(PN->getDebugLoc());
2009
2010 for (unsigned i = 0; i != NumPHIValues; ++i)
2011 NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
2012
2013 if (IdenticalUsers) {
2014 // Collect and deduplicate users up-front to avoid iterator invalidation.
2016 for (User *U : PN->users()) {
2018 if (User == &I)
2019 continue;
2020 ToReplace.insert(User);
2021 }
2022 for (Instruction *I : ToReplace) {
2023 replaceInstUsesWith(*I, NewPN);
2025 }
2026 OneUse = true;
2027 }
2028
2029 if (OneUse) {
2030 replaceAllDbgUsesWith(*PN, *NewPN, *PN, DT);
2031 }
2032 return replaceInstUsesWith(I, NewPN);
2033}
2034
2036 if (!BO.isAssociative())
2037 return nullptr;
2038
2039 // Find the interleaved binary ops.
2040 auto Opc = BO.getOpcode();
2041 auto *BO0 = dyn_cast<BinaryOperator>(BO.getOperand(0));
2042 auto *BO1 = dyn_cast<BinaryOperator>(BO.getOperand(1));
2043 if (!BO0 || !BO1 || !BO0->hasNUses(2) || !BO1->hasNUses(2) ||
2044 BO0->getOpcode() != Opc || BO1->getOpcode() != Opc ||
2045 !BO0->isAssociative() || !BO1->isAssociative() ||
2046 BO0->getParent() != BO1->getParent())
2047 return nullptr;
2048
2049 assert(BO.isCommutative() && BO0->isCommutative() && BO1->isCommutative() &&
2050 "Expected commutative instructions!");
2051
2052 // Find the matching phis, forming the recurrences.
2053 PHINode *PN0, *PN1;
2054 Value *Start0, *Step0, *Start1, *Step1;
2055 if (!matchSimpleRecurrence(BO0, PN0, Start0, Step0) || !PN0->hasOneUse() ||
2056 !matchSimpleRecurrence(BO1, PN1, Start1, Step1) || !PN1->hasOneUse() ||
2057 PN0->getParent() != PN1->getParent())
2058 return nullptr;
2059
2060 assert(PN0->getNumIncomingValues() == 2 && PN1->getNumIncomingValues() == 2 &&
2061 "Expected PHIs with two incoming values!");
2062
2063 // Convert the start and step values to constants.
2064 auto *Init0 = dyn_cast<Constant>(Start0);
2065 auto *Init1 = dyn_cast<Constant>(Start1);
2066 auto *C0 = dyn_cast<Constant>(Step0);
2067 auto *C1 = dyn_cast<Constant>(Step1);
2068 if (!Init0 || !Init1 || !C0 || !C1)
2069 return nullptr;
2070
2071 // Fold the recurrence constants.
2072 auto *Init = ConstantFoldBinaryInstruction(Opc, Init0, Init1);
2073 auto *C = ConstantFoldBinaryInstruction(Opc, C0, C1);
2074 if (!Init || !C)
2075 return nullptr;
2076
2077 // Create the reduced PHI.
2078 auto *NewPN = PHINode::Create(PN0->getType(), PN0->getNumIncomingValues(),
2079 "reduced.phi");
2080
2081 // Create the new binary op.
2082 auto *NewBO = BinaryOperator::Create(Opc, NewPN, C);
2083 if (Opc == Instruction::FAdd || Opc == Instruction::FMul) {
2084 // Intersect FMF flags for FADD and FMUL.
2085 FastMathFlags Intersect = BO0->getFastMathFlags() &
2086 BO1->getFastMathFlags() & BO.getFastMathFlags();
2087 NewBO->setFastMathFlags(Intersect);
2088 } else {
2089 OverflowTracking Flags;
2090 Flags.AllKnownNonNegative = false;
2091 Flags.AllKnownNonZero = false;
2092 Flags.mergeFlags(*BO0);
2093 Flags.mergeFlags(*BO1);
2094 Flags.mergeFlags(BO);
2095 Flags.applyFlags(*NewBO);
2096 }
2097 NewBO->takeName(&BO);
2098
2099 for (unsigned I = 0, E = PN0->getNumIncomingValues(); I != E; ++I) {
2100 auto *V = PN0->getIncomingValue(I);
2101 auto *BB = PN0->getIncomingBlock(I);
2102 if (V == Init0) {
2103 assert(((PN1->getIncomingValue(0) == Init1 &&
2104 PN1->getIncomingBlock(0) == BB) ||
2105 (PN1->getIncomingValue(1) == Init1 &&
2106 PN1->getIncomingBlock(1) == BB)) &&
2107 "Invalid incoming block!");
2108 NewPN->addIncoming(Init, BB);
2109 } else if (V == BO0) {
2110 assert(((PN1->getIncomingValue(0) == BO1 &&
2111 PN1->getIncomingBlock(0) == BB) ||
2112 (PN1->getIncomingValue(1) == BO1 &&
2113 PN1->getIncomingBlock(1) == BB)) &&
2114 "Invalid incoming block!");
2115 NewPN->addIncoming(NewBO, BB);
2116 } else
2117 llvm_unreachable("Unexpected incoming value!");
2118 }
2119
2120 LLVM_DEBUG(dbgs() << " Combined " << *PN0 << "\n " << *BO0
2121 << "\n with " << *PN1 << "\n " << *BO1
2122 << '\n');
2123
2124 // Insert the new recurrence and remove the old (dead) ones.
2125 InsertNewInstWith(NewPN, PN0->getIterator());
2126 InsertNewInstWith(NewBO, BO0->getIterator());
2127
2134
2135 return replaceInstUsesWith(BO, NewBO);
2136}
2137
2139 // Attempt to fold binary operators whose operands are simple recurrences.
2140 if (auto *NewBO = foldBinopWithRecurrence(BO))
2141 return NewBO;
2142
2143 // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
2144 // we are guarding against replicating the binop in >1 predecessor.
2145 // This could miss matching a phi with 2 constant incoming values.
2146 auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
2147 auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
2148 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2149 Phi0->getNumOperands() != Phi1->getNumOperands())
2150 return nullptr;
2151
2152 // TODO: Remove the restriction for binop being in the same block as the phis.
2153 if (BO.getParent() != Phi0->getParent() ||
2154 BO.getParent() != Phi1->getParent())
2155 return nullptr;
2156
2157 // Fold if there is at least one specific constant value in phi0 or phi1's
2158 // incoming values that comes from the same block and this specific constant
2159 // value can be used to do optimization for specific binary operator.
2160 // For example:
2161 // %phi0 = phi i32 [0, %bb0], [%i, %bb1]
2162 // %phi1 = phi i32 [%j, %bb0], [0, %bb1]
2163 // %add = add i32 %phi0, %phi1
2164 // ==>
2165 // %add = phi i32 [%j, %bb0], [%i, %bb1]
2167 /*AllowRHSConstant*/ false);
2168 if (C) {
2169 SmallVector<Value *, 4> NewIncomingValues;
2170 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &> T) {
2171 auto &Phi0Use = std::get<0>(T);
2172 auto &Phi1Use = std::get<1>(T);
2173 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2174 return false;
2175 Value *Phi0UseV = Phi0Use.get();
2176 Value *Phi1UseV = Phi1Use.get();
2177 if (Phi0UseV == C)
2178 NewIncomingValues.push_back(Phi1UseV);
2179 else if (Phi1UseV == C)
2180 NewIncomingValues.push_back(Phi0UseV);
2181 else
2182 return false;
2183 return true;
2184 };
2185
2186 if (all_of(zip(Phi0->operands(), Phi1->operands()),
2187 CanFoldIncomingValuePair)) {
2188 PHINode *NewPhi =
2189 PHINode::Create(Phi0->getType(), Phi0->getNumOperands());
2190 assert(NewIncomingValues.size() == Phi0->getNumOperands() &&
2191 "The number of collected incoming values should equal the number "
2192 "of the original PHINode operands!");
2193 for (unsigned I = 0; I < Phi0->getNumOperands(); I++)
2194 NewPhi->addIncoming(NewIncomingValues[I], Phi0->getIncomingBlock(I));
2195 return NewPhi;
2196 }
2197 }
2198
2199 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2200 return nullptr;
2201
2202 // Match a pair of incoming constants for one of the predecessor blocks.
2203 BasicBlock *ConstBB, *OtherBB;
2204 Constant *C0, *C1;
2205 if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
2206 ConstBB = Phi0->getIncomingBlock(0);
2207 OtherBB = Phi0->getIncomingBlock(1);
2208 } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
2209 ConstBB = Phi0->getIncomingBlock(1);
2210 OtherBB = Phi0->getIncomingBlock(0);
2211 } else {
2212 return nullptr;
2213 }
2214 if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
2215 return nullptr;
2216
2217 // The block that we are hoisting to must reach here unconditionally.
2218 // Otherwise, we could be speculatively executing an expensive or
2219 // non-speculative op.
2220 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
2221 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2222 !DT.isReachableFromEntry(OtherBB))
2223 return nullptr;
2224
2225 // TODO: This check could be tightened to only apply to binops (div/rem) that
2226 // are not safe to speculatively execute. But that could allow hoisting
2227 // potentially expensive instructions (fdiv for example).
2228 for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
2230 return nullptr;
2231
2232 // Fold constants for the predecessor block with constant incoming values.
2233 Constant *NewC = ConstantFoldBinaryOpOperands(BO.getOpcode(), C0, C1, DL);
2234 if (!NewC)
2235 return nullptr;
2236
2237 // Make a new binop in the predecessor block with the non-constant incoming
2238 // values.
2239 Builder.SetInsertPoint(PredBlockBranch);
2240 Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
2241 Phi0->getIncomingValueForBlock(OtherBB),
2242 Phi1->getIncomingValueForBlock(OtherBB));
2243 if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2244 NotFoldedNewBO->copyIRFlags(&BO);
2245
2246 // Replace the binop with a phi of the new values. The old phis are dead.
2247 PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
2248 NewPhi->addIncoming(NewBO, OtherBB);
2249 NewPhi->addIncoming(NewC, ConstBB);
2250 return NewPhi;
2251}
2252
2254 if (!isa<Constant>(I.getOperand(1)))
2255 return nullptr;
2256
2257 if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
2258 if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
2259 return NewSel;
2260 } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
2261 if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
2262 return NewPhi;
2263 }
2264 return nullptr;
2265}
2266
2268 // If this GEP has only 0 indices, it is the same pointer as
2269 // Src. If Src is not a trivial GEP too, don't combine
2270 // the indices.
2271 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2272 !Src.hasOneUse())
2273 return false;
2274 return true;
2275}
2276
2277/// Find a constant NewC that has property:
2278/// shuffle(NewC, ShMask) = C
2279/// Returns nullptr if such a constant does not exist e.g. ShMask=<0,0> C=<1,2>
2280///
2281/// A 1-to-1 mapping is not required. Example:
2282/// ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <poison,5,6,poison>
2284 VectorType *NewCTy) {
2285 if (isa<ScalableVectorType>(NewCTy)) {
2286 Constant *Splat = C->getSplatValue();
2287 if (!Splat)
2288 return nullptr;
2290 }
2291
2292 if (cast<FixedVectorType>(NewCTy)->getNumElements() >
2293 cast<FixedVectorType>(C->getType())->getNumElements())
2294 return nullptr;
2295
2296 unsigned NewCNumElts = cast<FixedVectorType>(NewCTy)->getNumElements();
2297 PoisonValue *PoisonScalar = PoisonValue::get(C->getType()->getScalarType());
2298 SmallVector<Constant *, 16> NewVecC(NewCNumElts, PoisonScalar);
2299 unsigned NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
2300 for (unsigned I = 0; I < NumElts; ++I) {
2301 Constant *CElt = C->getAggregateElement(I);
2302 if (ShMask[I] >= 0) {
2303 assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
2304 Constant *NewCElt = NewVecC[ShMask[I]];
2305 // Bail out if:
2306 // 1. The constant vector contains a constant expression.
2307 // 2. The shuffle needs an element of the constant vector that can't
2308 // be mapped to a new constant vector.
2309 // 3. This is a widening shuffle that copies elements of V1 into the
2310 // extended elements (extending with poison is allowed).
2311 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2312 I >= NewCNumElts)
2313 return nullptr;
2314 NewVecC[ShMask[I]] = CElt;
2315 }
2316 }
2317 return ConstantVector::get(NewVecC);
2318}
2319
2321 if (!isa<VectorType>(Inst.getType()))
2322 return nullptr;
2323
2324 BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
2325 Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
2326 assert(cast<VectorType>(LHS->getType())->getElementCount() ==
2327 cast<VectorType>(Inst.getType())->getElementCount());
2328 assert(cast<VectorType>(RHS->getType())->getElementCount() ==
2329 cast<VectorType>(Inst.getType())->getElementCount());
2330
2331 // If both operands of the binop are vector concatenations, then perform the
2332 // narrow binop on each pair of the source operands followed by concatenation
2333 // of the results.
2334 Value *L0, *L1, *R0, *R1;
2335 ArrayRef<int> Mask;
2336 if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
2337 match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
2338 LHS->hasOneUse() && RHS->hasOneUse() &&
2339 cast<ShuffleVectorInst>(LHS)->isConcat() &&
2340 cast<ShuffleVectorInst>(RHS)->isConcat()) {
2341 // This transform does not have the speculative execution constraint as
2342 // below because the shuffle is a concatenation. The new binops are
2343 // operating on exactly the same elements as the existing binop.
2344 // TODO: We could ease the mask requirement to allow different undef lanes,
2345 // but that requires an analysis of the binop-with-undef output value.
2346 Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
2347 if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2348 BO->copyIRFlags(&Inst);
2349 Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
2350 if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2351 BO->copyIRFlags(&Inst);
2352 return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
2353 }
2354
2355 auto createBinOpReverse = [&](Value *X, Value *Y) {
2356 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2357 if (auto *BO = dyn_cast<BinaryOperator>(V))
2358 BO->copyIRFlags(&Inst);
2359 Module *M = Inst.getModule();
2361 M, Intrinsic::vector_reverse, V->getType());
2362 return CallInst::Create(F, V);
2363 };
2364
2365 // NOTE: Reverse shuffles don't require the speculative execution protection
2366 // below because they don't affect which lanes take part in the computation.
2367
2368 Value *V1, *V2;
2369 if (match(LHS, m_VecReverse(m_Value(V1)))) {
2370 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2371 if (match(RHS, m_VecReverse(m_Value(V2))) &&
2372 (LHS->hasOneUse() || RHS->hasOneUse() ||
2373 (LHS == RHS && LHS->hasNUses(2))))
2374 return createBinOpReverse(V1, V2);
2375
2376 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2377 if (LHS->hasOneUse() && isSplatValue(RHS))
2378 return createBinOpReverse(V1, RHS);
2379 }
2380 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2381 else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
2382 return createBinOpReverse(LHS, V2);
2383
2384 auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
2385 Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
2386 if (auto *BO = dyn_cast<BinaryOperator>(V))
2387 BO->copyIRFlags(&Inst);
2388
2389 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
2390 Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
2391 Module *M = Inst.getModule();
2393 M, Intrinsic::experimental_vp_reverse, V->getType());
2394 return CallInst::Create(F, {V, AllTrueMask, EVL});
2395 };
2396
2397 Value *EVL;
2399 m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
2400 // Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
2402 m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
2403 (LHS->hasOneUse() || RHS->hasOneUse() ||
2404 (LHS == RHS && LHS->hasNUses(2))))
2405 return createBinOpVPReverse(V1, V2, EVL);
2406
2407 // Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
2408 if (LHS->hasOneUse() && isSplatValue(RHS))
2409 return createBinOpVPReverse(V1, RHS, EVL);
2410 }
2411 // Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
2412 else if (isSplatValue(LHS) &&
2414 m_Value(V2), m_AllOnes(), m_Value(EVL))))
2415 return createBinOpVPReverse(LHS, V2, EVL);
2416
2417 // It may not be safe to reorder shuffles and things like div, urem, etc.
2418 // because we may trap when executing those ops on unknown vector elements.
2419 // See PR20059.
2421 return nullptr;
2422
2423 auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
2424 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
2425 if (auto *BO = dyn_cast<BinaryOperator>(XY))
2426 BO->copyIRFlags(&Inst);
2427 return new ShuffleVectorInst(XY, M);
2428 };
2429
2430 // If both arguments of the binary operation are shuffles that use the same
2431 // mask and shuffle within a single vector, move the shuffle after the binop.
2432 if (match(LHS, m_Shuffle(m_Value(V1), m_Poison(), m_Mask(Mask))) &&
2433 match(RHS, m_Shuffle(m_Value(V2), m_Poison(), m_SpecificMask(Mask))) &&
2434 V1->getType() == V2->getType() &&
2435 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2436 // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
2437 return createBinOpShuffle(V1, V2, Mask);
2438 }
2439
2440 // If both arguments of a commutative binop are select-shuffles that use the
2441 // same mask with commuted operands, the shuffles are unnecessary.
2442 if (Inst.isCommutative() &&
2443 match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
2444 match(RHS,
2445 m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
2446 auto *LShuf = cast<ShuffleVectorInst>(LHS);
2447 auto *RShuf = cast<ShuffleVectorInst>(RHS);
2448 // TODO: Allow shuffles that contain undefs in the mask?
2449 // That is legal, but it reduces undef knowledge.
2450 // TODO: Allow arbitrary shuffles by shuffling after binop?
2451 // That might be legal, but we have to deal with poison.
2452 if (LShuf->isSelect() &&
2453 !is_contained(LShuf->getShuffleMask(), PoisonMaskElem) &&
2454 RShuf->isSelect() &&
2455 !is_contained(RShuf->getShuffleMask(), PoisonMaskElem)) {
2456 // Example:
2457 // LHS = shuffle V1, V2, <0, 5, 6, 3>
2458 // RHS = shuffle V2, V1, <0, 5, 6, 3>
2459 // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
2460 Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
2461 NewBO->copyIRFlags(&Inst);
2462 return NewBO;
2463 }
2464 }
2465
2466 // If one argument is a shuffle within one vector and the other is a constant,
2467 // try moving the shuffle after the binary operation. This canonicalization
2468 // intends to move shuffles closer to other shuffles and binops closer to
2469 // other binops, so they can be folded. It may also enable demanded elements
2470 // transforms.
2471 Constant *C;
2473 m_Mask(Mask))),
2474 m_ImmConstant(C)))) {
2475 assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() &&
2476 "Shuffle should not change scalar type");
2477
2478 bool ConstOp1 = isa<Constant>(RHS);
2479 if (Constant *NewC =
2481 // For fixed vectors, lanes of NewC not used by the shuffle will be poison
2482 // which will cause UB for div/rem. Mask them with a safe constant.
2483 if (isa<FixedVectorType>(V1->getType()) && Inst.isIntDivRem())
2484 NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
2485
2486 // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
2487 // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
2488 Value *NewLHS = ConstOp1 ? V1 : NewC;
2489 Value *NewRHS = ConstOp1 ? NewC : V1;
2490 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2491 }
2492 }
2493
2494 // Try to reassociate to sink a splat shuffle after a binary operation.
2495 if (Inst.isAssociative() && Inst.isCommutative()) {
2496 // Canonicalize shuffle operand as LHS.
2497 if (isa<ShuffleVectorInst>(RHS))
2498 std::swap(LHS, RHS);
2499
2500 Value *X;
2501 ArrayRef<int> MaskC;
2502 int SplatIndex;
2503 Value *Y, *OtherOp;
2504 if (!match(LHS,
2505 m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
2506 !match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
2507 X->getType() != Inst.getType() ||
2508 !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
2509 return nullptr;
2510
2511 // FIXME: This may not be safe if the analysis allows undef elements. By
2512 // moving 'Y' before the splat shuffle, we are implicitly assuming
2513 // that it is not undef/poison at the splat index.
2514 if (isSplatValue(OtherOp, SplatIndex)) {
2515 std::swap(Y, OtherOp);
2516 } else if (!isSplatValue(Y, SplatIndex)) {
2517 return nullptr;
2518 }
2519
2520 // X and Y are splatted values, so perform the binary operation on those
2521 // values followed by a splat followed by the 2nd binary operation:
2522 // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
2523 Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
2524 SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
2525 Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
2526 Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
2527
2528 // Intersect FMF on both new binops. Other (poison-generating) flags are
2529 // dropped to be safe.
2530 if (isa<FPMathOperator>(R)) {
2531 R->copyFastMathFlags(&Inst);
2532 R->andIRFlags(RHS);
2533 }
2534 if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2535 NewInstBO->copyIRFlags(R);
2536 return R;
2537 }
2538
2539 return nullptr;
2540}
2541
2542/// Try to narrow the width of a binop if at least 1 operand is an extend of
2543/// of a value. This requires a potentially expensive known bits check to make
2544/// sure the narrow op does not overflow.
2545Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
2546 // We need at least one extended operand.
2547 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
2548
2549 // If this is a sub, we swap the operands since we always want an extension
2550 // on the RHS. The LHS can be an extension or a constant.
2551 if (BO.getOpcode() == Instruction::Sub)
2552 std::swap(Op0, Op1);
2553
2554 Value *X;
2555 bool IsSext = match(Op0, m_SExt(m_Value(X)));
2556 if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
2557 return nullptr;
2558
2559 // If both operands are the same extension from the same source type and we
2560 // can eliminate at least one (hasOneUse), this might work.
2561 CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
2562 Value *Y;
2563 if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
2564 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2565 (Op0->hasOneUse() || Op1->hasOneUse()))) {
2566 // If that did not match, see if we have a suitable constant operand.
2567 // Truncating and extending must produce the same constant.
2568 Constant *WideC;
2569 if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
2570 return nullptr;
2571 Constant *NarrowC = getLosslessInvCast(WideC, X->getType(), CastOpc, DL);
2572 if (!NarrowC)
2573 return nullptr;
2574 Y = NarrowC;
2575 }
2576
2577 // Swap back now that we found our operands.
2578 if (BO.getOpcode() == Instruction::Sub)
2579 std::swap(X, Y);
2580
2581 // Both operands have narrow versions. Last step: the math must not overflow
2582 // in the narrow width.
2583 if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
2584 return nullptr;
2585
2586 // bo (ext X), (ext Y) --> ext (bo X, Y)
2587 // bo (ext X), C --> ext (bo X, C')
2588 Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
2589 if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2590 if (IsSext)
2591 NewBinOp->setHasNoSignedWrap();
2592 else
2593 NewBinOp->setHasNoUnsignedWrap();
2594 }
2595 return CastInst::Create(CastOpc, NarrowBO, BO.getType());
2596}
2597
2598/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
2599/// transform.
2604
2605/// Thread a GEP operation with constant indices through the constant true/false
2606/// arms of a select.
2608 InstCombiner::BuilderTy &Builder) {
2609 if (!GEP.hasAllConstantIndices())
2610 return nullptr;
2611
2612 Instruction *Sel;
2613 Value *Cond;
2614 Constant *TrueC, *FalseC;
2615 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
2616 !match(Sel,
2617 m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
2618 return nullptr;
2619
2620 // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
2621 // Propagate 'inbounds' and metadata from existing instructions.
2622 // Note: using IRBuilder to create the constants for efficiency.
2623 SmallVector<Value *, 4> IndexC(GEP.indices());
2624 GEPNoWrapFlags NW = GEP.getNoWrapFlags();
2625 Type *Ty = GEP.getSourceElementType();
2626 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC, "", NW);
2627 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC, "", NW);
2628 return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
2629}
2630
2631// Canonicalization:
2632// gep T, (gep i8, base, C1), (Index + C2) into
2633// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
2635 GEPOperator *Src,
2636 InstCombinerImpl &IC) {
2637 if (GEP.getNumIndices() != 1)
2638 return nullptr;
2639 auto &DL = IC.getDataLayout();
2640 Value *Base;
2641 const APInt *C1;
2642 if (!match(Src, m_PtrAdd(m_Value(Base), m_APInt(C1))))
2643 return nullptr;
2644 Value *VarIndex;
2645 const APInt *C2;
2646 Type *PtrTy = Src->getType()->getScalarType();
2647 unsigned IndexSizeInBits = DL.getIndexTypeSizeInBits(PtrTy);
2648 if (!match(GEP.getOperand(1), m_AddLike(m_Value(VarIndex), m_APInt(C2))))
2649 return nullptr;
2650 if (C1->getBitWidth() != IndexSizeInBits ||
2651 C2->getBitWidth() != IndexSizeInBits)
2652 return nullptr;
2653 Type *BaseType = GEP.getSourceElementType();
2655 return nullptr;
2656 APInt TypeSize(IndexSizeInBits, DL.getTypeAllocSize(BaseType));
2657 APInt NewOffset = TypeSize * *C2 + *C1;
2658 if (NewOffset.isZero() ||
2659 (Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
2661 if (GEP.hasNoUnsignedWrap() &&
2662 cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
2663 match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
2665 if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
2666 Flags |= GEPNoWrapFlags::inBounds();
2667 }
2668
2669 Value *GEPConst =
2670 IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
2671 return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
2672 }
2673
2674 return nullptr;
2675}
2676
2677/// Combine constant offsets separated by variable offsets.
2678/// ptradd (ptradd (ptradd p, C1), x), C2 -> ptradd (ptradd p, x), C1+C2
2680 InstCombinerImpl &IC) {
2681 if (!GEP.hasAllConstantIndices())
2682 return nullptr;
2683
2686 auto *InnerGEP = dyn_cast<GetElementPtrInst>(GEP.getPointerOperand());
2687 while (true) {
2688 if (!InnerGEP)
2689 return nullptr;
2690
2691 NW = NW.intersectForReassociate(InnerGEP->getNoWrapFlags());
2692 if (InnerGEP->hasAllConstantIndices())
2693 break;
2694
2695 if (!InnerGEP->hasOneUse())
2696 return nullptr;
2697
2698 Skipped.push_back(InnerGEP);
2699 InnerGEP = dyn_cast<GetElementPtrInst>(InnerGEP->getPointerOperand());
2700 }
2701
2702 // The two constant offset GEPs are directly adjacent: Let normal offset
2703 // merging handle it.
2704 if (Skipped.empty())
2705 return nullptr;
2706
2707 // FIXME: This one-use check is not strictly necessary. Consider relaxing it
2708 // if profitable.
2709 if (!InnerGEP->hasOneUse())
2710 return nullptr;
2711
2712 // Don't bother with vector splats.
2713 Type *Ty = GEP.getType();
2714 if (InnerGEP->getType() != Ty)
2715 return nullptr;
2716
2717 const DataLayout &DL = IC.getDataLayout();
2718 APInt Offset(DL.getIndexTypeSizeInBits(Ty), 0);
2719 if (!GEP.accumulateConstantOffset(DL, Offset) ||
2720 !InnerGEP->accumulateConstantOffset(DL, Offset))
2721 return nullptr;
2722
2723 IC.replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2724 for (GetElementPtrInst *SkippedGEP : Skipped)
2725 SkippedGEP->setNoWrapFlags(NW);
2726
2727 return IC.replaceInstUsesWith(
2728 GEP,
2729 IC.Builder.CreatePtrAdd(Skipped.front(), IC.Builder.getInt(Offset), "",
2730 NW.intersectForOffsetAdd(GEP.getNoWrapFlags())));
2731}
2732
2734 GEPOperator *Src) {
2735 // Combine Indices - If the source pointer to this getelementptr instruction
2736 // is a getelementptr instruction with matching element type, combine the
2737 // indices of the two getelementptr instructions into a single instruction.
2738 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
2739 return nullptr;
2740
2741 if (auto *I = canonicalizeGEPOfConstGEPI8(GEP, Src, *this))
2742 return I;
2743
2744 if (auto *I = combineConstantOffsets(GEP, *this))
2745 return I;
2746
2747 // For constant GEPs, use a more general offset-based folding approach.
2748 Type *PtrTy = Src->getType()->getScalarType();
2749 if (GEP.hasAllConstantIndices() &&
2750 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2751 // Split Src into a variable part and a constant suffix.
2753 Type *BaseType = GTI.getIndexedType();
2754 bool IsFirstType = true;
2755 unsigned NumVarIndices = 0;
2756 for (auto Pair : enumerate(Src->indices())) {
2757 if (!isa<ConstantInt>(Pair.value())) {
2758 BaseType = GTI.getIndexedType();
2759 IsFirstType = false;
2760 NumVarIndices = Pair.index() + 1;
2761 }
2762 ++GTI;
2763 }
2764
2765 // Determine the offset for the constant suffix of Src.
2766 APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
2767 if (NumVarIndices != Src->getNumIndices()) {
2768 // FIXME: getIndexedOffsetInType() does not handled scalable vectors.
2769 if (BaseType->isScalableTy())
2770 return nullptr;
2771
2772 SmallVector<Value *> ConstantIndices;
2773 if (!IsFirstType)
2774 ConstantIndices.push_back(
2776 append_range(ConstantIndices, drop_begin(Src->indices(), NumVarIndices));
2777 Offset += DL.getIndexedOffsetInType(BaseType, ConstantIndices);
2778 }
2779
2780 // Add the offset for GEP (which is fully constant).
2781 if (!GEP.accumulateConstantOffset(DL, Offset))
2782 return nullptr;
2783
2784 // Convert the total offset back into indices.
2785 SmallVector<APInt> ConstIndices =
2786 DL.getGEPIndicesForOffset(BaseType, Offset);
2787 if (!Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2788 return nullptr;
2789
2791 SmallVector<Value *> Indices(
2792 drop_end(Src->indices(), Src->getNumIndices() - NumVarIndices));
2793 for (const APInt &Idx : drop_begin(ConstIndices, !IsFirstType)) {
2794 Indices.push_back(ConstantInt::get(GEP.getContext(), Idx));
2795 // Even if the total offset is inbounds, we may end up representing it
2796 // by first performing a larger negative offset, and then a smaller
2797 // positive one. The large negative offset might go out of bounds. Only
2798 // preserve inbounds if all signs are the same.
2799 if (Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2801 if (!Idx.isNonNegative())
2802 NW = NW.withoutNoUnsignedWrap();
2803 }
2804
2805 return replaceInstUsesWith(
2806 GEP, Builder.CreateGEP(Src->getSourceElementType(), Src->getOperand(0),
2807 Indices, "", NW));
2808 }
2809
2810 if (Src->getResultElementType() != GEP.getSourceElementType())
2811 return nullptr;
2812
2813 SmallVector<Value*, 8> Indices;
2814
2815 // Find out whether the last index in the source GEP is a sequential idx.
2816 bool EndsWithSequential = false;
2817 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2818 I != E; ++I)
2819 EndsWithSequential = I.isSequential();
2820
2821 // Can we combine the two pointer arithmetics offsets?
2822 if (EndsWithSequential) {
2823 // Replace: gep (gep %P, long B), long A, ...
2824 // With: T = long A+B; gep %P, T, ...
2825 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2826 Value *GO1 = GEP.getOperand(1);
2827
2828 // If they aren't the same type, then the input hasn't been processed
2829 // by the loop above yet (which canonicalizes sequential index types to
2830 // intptr_t). Just avoid transforming this until the input has been
2831 // normalized.
2832 if (SO1->getType() != GO1->getType())
2833 return nullptr;
2834
2835 Value *Sum =
2836 simplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2837 // Only do the combine when we are sure the cost after the
2838 // merge is never more than that before the merge.
2839 if (Sum == nullptr)
2840 return nullptr;
2841
2842 Indices.append(Src->op_begin()+1, Src->op_end()-1);
2843 Indices.push_back(Sum);
2844 Indices.append(GEP.op_begin()+2, GEP.op_end());
2845 } else if (isa<Constant>(*GEP.idx_begin()) &&
2846 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2847 Src->getNumOperands() != 1) {
2848 // Otherwise we can do the fold if the first index of the GEP is a zero
2849 Indices.append(Src->op_begin()+1, Src->op_end());
2850 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2851 }
2852
2853 // Don't create GEPs with more than one variable index.
2854 unsigned NumVarIndices =
2855 count_if(Indices, [](Value *Idx) { return !isa<Constant>(Idx); });
2856 if (NumVarIndices > 1)
2857 return nullptr;
2858
2859 if (!Indices.empty())
2860 return replaceInstUsesWith(
2861 GEP, Builder.CreateGEP(
2862 Src->getSourceElementType(), Src->getOperand(0), Indices, "",
2864
2865 return nullptr;
2866}
2867
2870 bool &DoesConsume, unsigned Depth) {
2871 static Value *const NonNull = reinterpret_cast<Value *>(uintptr_t(1));
2872 // ~(~(X)) -> X.
2873 Value *A, *B;
2874 if (match(V, m_Not(m_Value(A)))) {
2875 DoesConsume = true;
2876 return A;
2877 }
2878
2879 Constant *C;
2880 // Constants can be considered to be not'ed values.
2881 if (match(V, m_ImmConstant(C)))
2882 return ConstantExpr::getNot(C);
2883
2885 return nullptr;
2886
2887 // The rest of the cases require that we invert all uses so don't bother
2888 // doing the analysis if we know we can't use the result.
2889 if (!WillInvertAllUses)
2890 return nullptr;
2891
2892 // Compares can be inverted if all of their uses are being modified to use
2893 // the ~V.
2894 if (auto *I = dyn_cast<CmpInst>(V)) {
2895 if (Builder != nullptr)
2896 return Builder->CreateCmp(I->getInversePredicate(), I->getOperand(0),
2897 I->getOperand(1));
2898 return NonNull;
2899 }
2900
2901 // If `V` is of the form `A + B` then `-1 - V` can be folded into
2902 // `(-1 - B) - A` if we are willing to invert all of the uses.
2903 if (match(V, m_Add(m_Value(A), m_Value(B)))) {
2904 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2905 DoesConsume, Depth))
2906 return Builder ? Builder->CreateSub(BV, A) : NonNull;
2907 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2908 DoesConsume, Depth))
2909 return Builder ? Builder->CreateSub(AV, B) : NonNull;
2910 return nullptr;
2911 }
2912
2913 // If `V` is of the form `A ^ ~B` then `~(A ^ ~B)` can be folded
2914 // into `A ^ B` if we are willing to invert all of the uses.
2915 if (match(V, m_Xor(m_Value(A), m_Value(B)))) {
2916 if (auto *BV = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2917 DoesConsume, Depth))
2918 return Builder ? Builder->CreateXor(A, BV) : NonNull;
2919 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2920 DoesConsume, Depth))
2921 return Builder ? Builder->CreateXor(AV, B) : NonNull;
2922 return nullptr;
2923 }
2924
2925 // If `V` is of the form `B - A` then `-1 - V` can be folded into
2926 // `A + (-1 - B)` if we are willing to invert all of the uses.
2927 if (match(V, m_Sub(m_Value(A), m_Value(B)))) {
2928 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2929 DoesConsume, Depth))
2930 return Builder ? Builder->CreateAdd(AV, B) : NonNull;
2931 return nullptr;
2932 }
2933
2934 // If `V` is of the form `(~A) s>> B` then `~((~A) s>> B)` can be folded
2935 // into `A s>> B` if we are willing to invert all of the uses.
2936 if (match(V, m_AShr(m_Value(A), m_Value(B)))) {
2937 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2938 DoesConsume, Depth))
2939 return Builder ? Builder->CreateAShr(AV, B) : NonNull;
2940 return nullptr;
2941 }
2942
2943 Value *Cond;
2944 // LogicOps are special in that we canonicalize them at the cost of an
2945 // instruction.
2946 bool IsSelect = match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
2948 // Selects/min/max with invertible operands are freely invertible
2949 if (IsSelect || match(V, m_MaxOrMin(m_Value(A), m_Value(B)))) {
2950 bool LocalDoesConsume = DoesConsume;
2951 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder*/ nullptr,
2952 LocalDoesConsume, Depth))
2953 return nullptr;
2954 if (Value *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
2955 LocalDoesConsume, Depth)) {
2956 DoesConsume = LocalDoesConsume;
2957 if (Builder != nullptr) {
2958 Value *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
2959 DoesConsume, Depth);
2960 assert(NotB != nullptr &&
2961 "Unable to build inverted value for known freely invertable op");
2962 if (auto *II = dyn_cast<IntrinsicInst>(V))
2963 return Builder->CreateBinaryIntrinsic(
2964 getInverseMinMaxIntrinsic(II->getIntrinsicID()), NotA, NotB);
2965 return Builder->CreateSelect(Cond, NotA, NotB);
2966 }
2967 return NonNull;
2968 }
2969 }
2970
2971 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2972 bool LocalDoesConsume = DoesConsume;
2974 for (Use &U : PN->operands()) {
2975 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2976 Value *NewIncomingVal = getFreelyInvertedImpl(
2977 U.get(), /*WillInvertAllUses=*/false,
2978 /*Builder=*/nullptr, LocalDoesConsume, MaxAnalysisRecursionDepth - 1);
2979 if (NewIncomingVal == nullptr)
2980 return nullptr;
2981 // Make sure that we can safely erase the original PHI node.
2982 if (NewIncomingVal == V)
2983 return nullptr;
2984 if (Builder != nullptr)
2985 IncomingValues.emplace_back(NewIncomingVal, IncomingBlock);
2986 }
2987
2988 DoesConsume = LocalDoesConsume;
2989 if (Builder != nullptr) {
2991 Builder->SetInsertPoint(PN);
2992 PHINode *NewPN =
2993 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
2994 for (auto [Val, Pred] : IncomingValues)
2995 NewPN->addIncoming(Val, Pred);
2996 return NewPN;
2997 }
2998 return NonNull;
2999 }
3000
3001 if (match(V, m_SExtLike(m_Value(A)))) {
3002 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3003 DoesConsume, Depth))
3004 return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
3005 return nullptr;
3006 }
3007
3008 if (match(V, m_Trunc(m_Value(A)))) {
3009 if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3010 DoesConsume, Depth))
3011 return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
3012 return nullptr;
3013 }
3014
3015 // De Morgan's Laws:
3016 // (~(A | B)) -> (~A & ~B)
3017 // (~(A & B)) -> (~A | ~B)
3018 auto TryInvertAndOrUsingDeMorgan = [&](Instruction::BinaryOps Opcode,
3019 bool IsLogical, Value *A,
3020 Value *B) -> Value * {
3021 bool LocalDoesConsume = DoesConsume;
3022 if (!getFreelyInvertedImpl(B, B->hasOneUse(), /*Builder=*/nullptr,
3023 LocalDoesConsume, Depth))
3024 return nullptr;
3025 if (auto *NotA = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
3026 LocalDoesConsume, Depth)) {
3027 auto *NotB = getFreelyInvertedImpl(B, B->hasOneUse(), Builder,
3028 LocalDoesConsume, Depth);
3029 DoesConsume = LocalDoesConsume;
3030 if (IsLogical)
3031 return Builder ? Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3032 return Builder ? Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3033 }
3034
3035 return nullptr;
3036 };
3037
3038 if (match(V, m_Or(m_Value(A), m_Value(B))))
3039 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/false, A,
3040 B);
3041
3042 if (match(V, m_And(m_Value(A), m_Value(B))))
3043 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/false, A,
3044 B);
3045
3046 if (match(V, m_LogicalOr(m_Value(A), m_Value(B))))
3047 return TryInvertAndOrUsingDeMorgan(Instruction::And, /*IsLogical=*/true, A,
3048 B);
3049
3050 if (match(V, m_LogicalAnd(m_Value(A), m_Value(B))))
3051 return TryInvertAndOrUsingDeMorgan(Instruction::Or, /*IsLogical=*/true, A,
3052 B);
3053
3054 return nullptr;
3055}
3056
3057/// Return true if we should canonicalize the gep to an i8 ptradd.
3059 Value *PtrOp = GEP.getOperand(0);
3060 Type *GEPEltType = GEP.getSourceElementType();
3061 if (GEPEltType->isIntegerTy(8))
3062 return false;
3063
3064 // Canonicalize scalable GEPs to an explicit offset using the llvm.vscale
3065 // intrinsic. This has better support in BasicAA.
3066 if (GEPEltType->isScalableTy())
3067 return true;
3068
3069 // gep i32 p, mul(O, C) -> gep i8, p, mul(O, C*4) to fold the two multiplies
3070 // together.
3071 if (GEP.getNumIndices() == 1 &&
3072 match(GEP.getOperand(1),
3074 m_Shl(m_Value(), m_ConstantInt())))))
3075 return true;
3076
3077 // gep (gep %p, C1), %x, C2 is expanded so the two constants can
3078 // possibly be merged together.
3079 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
3080 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3081 any_of(GEP.indices(), [](Value *V) {
3082 const APInt *C;
3083 return match(V, m_APInt(C)) && !C->isZero();
3084 });
3085}
3086
3088 IRBuilderBase &Builder) {
3089 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
3090 if (!Op1)
3091 return nullptr;
3092
3093 // Don't fold a GEP into itself through a PHI node. This can only happen
3094 // through the back-edge of a loop. Folding a GEP into itself means that
3095 // the value of the previous iteration needs to be stored in the meantime,
3096 // thus requiring an additional register variable to be live, but not
3097 // actually achieving anything (the GEP still needs to be executed once per
3098 // loop iteration).
3099 if (Op1 == &GEP)
3100 return nullptr;
3101 GEPNoWrapFlags NW = Op1->getNoWrapFlags();
3102
3103 int DI = -1;
3104
3105 for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
3106 auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
3107 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3108 Op1->getSourceElementType() != Op2->getSourceElementType())
3109 return nullptr;
3110
3111 // As for Op1 above, don't try to fold a GEP into itself.
3112 if (Op2 == &GEP)
3113 return nullptr;
3114
3115 // Keep track of the type as we walk the GEP.
3116 Type *CurTy = nullptr;
3117
3118 for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
3119 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3120 return nullptr;
3121
3122 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3123 if (DI == -1) {
3124 // We have not seen any differences yet in the GEPs feeding the
3125 // PHI yet, so we record this one if it is allowed to be a
3126 // variable.
3127
3128 // The first two arguments can vary for any GEP, the rest have to be
3129 // static for struct slots
3130 if (J > 1) {
3131 assert(CurTy && "No current type?");
3132 if (CurTy->isStructTy())
3133 return nullptr;
3134 }
3135
3136 DI = J;
3137 } else {
3138 // The GEP is different by more than one input. While this could be
3139 // extended to support GEPs that vary by more than one variable it
3140 // doesn't make sense since it greatly increases the complexity and
3141 // would result in an R+R+R addressing mode which no backend
3142 // directly supports and would need to be broken into several
3143 // simpler instructions anyway.
3144 return nullptr;
3145 }
3146 }
3147
3148 // Sink down a layer of the type for the next iteration.
3149 if (J > 0) {
3150 if (J == 1) {
3151 CurTy = Op1->getSourceElementType();
3152 } else {
3153 CurTy =
3154 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
3155 }
3156 }
3157 }
3158
3159 NW &= Op2->getNoWrapFlags();
3160 }
3161
3162 // If not all GEPs are identical we'll have to create a new PHI node.
3163 // Check that the old PHI node has only one use so that it will get
3164 // removed.
3165 if (DI != -1 && !PN->hasOneUse())
3166 return nullptr;
3167
3168 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
3169 NewGEP->setNoWrapFlags(NW);
3170
3171 if (DI == -1) {
3172 // All the GEPs feeding the PHI are identical. Clone one down into our
3173 // BB so that it can be merged with the current GEP.
3174 } else {
3175 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
3176 // into the current block so it can be merged, and create a new PHI to
3177 // set that index.
3178 PHINode *NewPN;
3179 {
3180 IRBuilderBase::InsertPointGuard Guard(Builder);
3181 Builder.SetInsertPoint(PN);
3182 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3183 PN->getNumOperands());
3184 }
3185
3186 for (auto &I : PN->operands())
3187 NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
3188 PN->getIncomingBlock(I));
3189
3190 NewGEP->setOperand(DI, NewPN);
3191 }
3192
3193 NewGEP->insertBefore(*GEP.getParent(), GEP.getParent()->getFirstInsertionPt());
3194 return NewGEP;
3195}
3196
3198 Value *PtrOp = GEP.getOperand(0);
3199 SmallVector<Value *, 8> Indices(GEP.indices());
3200 Type *GEPType = GEP.getType();
3201 Type *GEPEltType = GEP.getSourceElementType();
3202 if (Value *V =
3203 simplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.getNoWrapFlags(),
3204 SQ.getWithInstruction(&GEP)))
3205 return replaceInstUsesWith(GEP, V);
3206
3207 // For vector geps, use the generic demanded vector support.
3208 // Skip if GEP return type is scalable. The number of elements is unknown at
3209 // compile-time.
3210 if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
3211 auto VWidth = GEPFVTy->getNumElements();
3212 APInt PoisonElts(VWidth, 0);
3213 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
3214 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
3215 PoisonElts)) {
3216 if (V != &GEP)
3217 return replaceInstUsesWith(GEP, V);
3218 return &GEP;
3219 }
3220 }
3221
3222 // Eliminate unneeded casts for indices, and replace indices which displace
3223 // by multiples of a zero size type with zero.
3224 bool MadeChange = false;
3225
3226 // Index width may not be the same width as pointer width.
3227 // Data layout chooses the right type based on supported integer types.
3228 Type *NewScalarIndexTy =
3229 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
3230
3232 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
3233 ++I, ++GTI) {
3234 // Skip indices into struct types.
3235 if (GTI.isStruct())
3236 continue;
3237
3238 Type *IndexTy = (*I)->getType();
3239 Type *NewIndexType =
3240 IndexTy->isVectorTy()
3241 ? VectorType::get(NewScalarIndexTy,
3242 cast<VectorType>(IndexTy)->getElementCount())
3243 : NewScalarIndexTy;
3244
3245 // If the element type has zero size then any index over it is equivalent
3246 // to an index of zero, so replace it with zero if it is not zero already.
3247 Type *EltTy = GTI.getIndexedType();
3248 if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
3249 if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
3250 *I = Constant::getNullValue(NewIndexType);
3251 MadeChange = true;
3252 }
3253
3254 if (IndexTy != NewIndexType) {
3255 // If we are using a wider index than needed for this platform, shrink
3256 // it to what we need. If narrower, sign-extend it to what we need.
3257 // This explicit cast can make subsequent optimizations more obvious.
3258 if (IndexTy->getScalarSizeInBits() <
3259 NewIndexType->getScalarSizeInBits()) {
3260 if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
3261 *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
3262 else
3263 *I = Builder.CreateSExt(*I, NewIndexType);
3264 } else {
3265 *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
3266 GEP.hasNoUnsignedSignedWrap());
3267 }
3268 MadeChange = true;
3269 }
3270 }
3271 if (MadeChange)
3272 return &GEP;
3273
3274 // Canonicalize constant GEPs to i8 type.
3275 if (!GEPEltType->isIntegerTy(8) && GEP.hasAllConstantIndices()) {
3276 APInt Offset(DL.getIndexTypeSizeInBits(GEPType), 0);
3277 if (GEP.accumulateConstantOffset(DL, Offset))
3278 return replaceInstUsesWith(
3279 GEP, Builder.CreatePtrAdd(PtrOp, Builder.getInt(Offset), "",
3280 GEP.getNoWrapFlags()));
3281 }
3282
3284 Value *Offset = EmitGEPOffset(cast<GEPOperator>(&GEP));
3285 Value *NewGEP =
3286 Builder.CreatePtrAdd(PtrOp, Offset, "", GEP.getNoWrapFlags());
3287 return replaceInstUsesWith(GEP, NewGEP);
3288 }
3289
3290 // Strip trailing zero indices.
3291 auto *LastIdx = dyn_cast<Constant>(Indices.back());
3292 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3293 return replaceInstUsesWith(
3294 GEP, Builder.CreateGEP(GEP.getSourceElementType(), PtrOp,
3295 drop_end(Indices), "", GEP.getNoWrapFlags()));
3296 }
3297
3298 // Strip leading zero indices.
3299 auto *FirstIdx = dyn_cast<Constant>(Indices.front());
3300 if (FirstIdx && FirstIdx->isNullValue() &&
3301 !FirstIdx->getType()->isVectorTy()) {
3303 ++GTI;
3304 if (!GTI.isStruct())
3305 return replaceInstUsesWith(GEP, Builder.CreateGEP(GTI.getIndexedType(),
3306 GEP.getPointerOperand(),
3307 drop_begin(Indices), "",
3308 GEP.getNoWrapFlags()));
3309 }
3310
3311 // Scalarize vector operands; prefer splat-of-gep.as canonical form.
3312 // Note that this looses information about undef lanes; we run it after
3313 // demanded bits to partially mitigate that loss.
3314 if (GEPType->isVectorTy() && llvm::any_of(GEP.operands(), [](Value *Op) {
3315 return Op->getType()->isVectorTy() && getSplatValue(Op);
3316 })) {
3317 SmallVector<Value *> NewOps;
3318 for (auto &Op : GEP.operands()) {
3319 if (Op->getType()->isVectorTy())
3320 if (Value *Scalar = getSplatValue(Op)) {
3321 NewOps.push_back(Scalar);
3322 continue;
3323 }
3324 NewOps.push_back(Op);
3325 }
3326
3327 Value *Res = Builder.CreateGEP(GEP.getSourceElementType(), NewOps[0],
3328 ArrayRef(NewOps).drop_front(), GEP.getName(),
3329 GEP.getNoWrapFlags());
3330 if (!Res->getType()->isVectorTy()) {
3331 ElementCount EC = cast<VectorType>(GEPType)->getElementCount();
3332 Res = Builder.CreateVectorSplat(EC, Res);
3333 }
3334 return replaceInstUsesWith(GEP, Res);
3335 }
3336
3337 bool SeenVarIndex = false;
3338 for (auto [IdxNum, Idx] : enumerate(Indices)) {
3339 if (isa<Constant>(Idx))
3340 continue;
3341
3342 if (!SeenVarIndex) {
3343 SeenVarIndex = true;
3344 continue;
3345 }
3346
3347 // GEP has multiple variable indices: Split it.
3348 ArrayRef<Value *> FrontIndices = ArrayRef(Indices).take_front(IdxNum);
3349 Value *FrontGEP =
3350 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3351 GEP.getName() + ".split", GEP.getNoWrapFlags());
3352
3353 SmallVector<Value *> BackIndices;
3354 BackIndices.push_back(Constant::getNullValue(NewScalarIndexTy));
3355 append_range(BackIndices, drop_begin(Indices, IdxNum));
3357 GetElementPtrInst::getIndexedType(GEPEltType, FrontIndices), FrontGEP,
3358 BackIndices, GEP.getNoWrapFlags());
3359 }
3360
3361 // Check to see if the inputs to the PHI node are getelementptr instructions.
3362 if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
3363 if (Value *NewPtrOp = foldGEPOfPhi(GEP, PN, Builder))
3364 return replaceOperand(GEP, 0, NewPtrOp);
3365 }
3366
3367 if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
3368 if (Instruction *I = visitGEPOfGEP(GEP, Src))
3369 return I;
3370
3371 if (GEP.getNumIndices() == 1) {
3372 unsigned AS = GEP.getPointerAddressSpace();
3373 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3374 DL.getIndexSizeInBits(AS)) {
3375 uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue();
3376
3377 if (TyAllocSize == 1) {
3378 // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y),
3379 // but only if the result pointer is only used as if it were an integer,
3380 // or both point to the same underlying object (otherwise provenance is
3381 // not necessarily retained).
3382 Value *X = GEP.getPointerOperand();
3383 Value *Y;
3384 if (match(GEP.getOperand(1),
3386 GEPType == Y->getType()) {
3387 bool HasSameUnderlyingObject =
3389 bool Changed = false;
3390 GEP.replaceUsesWithIf(Y, [&](Use &U) {
3391 bool ShouldReplace = HasSameUnderlyingObject ||
3392 isa<ICmpInst>(U.getUser()) ||
3393 isa<PtrToIntInst>(U.getUser());
3394 Changed |= ShouldReplace;
3395 return ShouldReplace;
3396 });
3397 return Changed ? &GEP : nullptr;
3398 }
3399 } else if (auto *ExactIns =
3400 dyn_cast<PossiblyExactOperator>(GEP.getOperand(1))) {
3401 // Canonicalize (gep T* X, V / sizeof(T)) to (gep i8* X, V)
3402 Value *V;
3403 if (ExactIns->isExact()) {
3404 if ((has_single_bit(TyAllocSize) &&
3405 match(GEP.getOperand(1),
3406 m_Shr(m_Value(V),
3407 m_SpecificInt(countr_zero(TyAllocSize))))) ||
3408 match(GEP.getOperand(1),
3409 m_IDiv(m_Value(V), m_SpecificInt(TyAllocSize)))) {
3410 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3411 GEP.getPointerOperand(), V,
3412 GEP.getNoWrapFlags());
3413 }
3414 }
3415 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3416 // Try to canonicalize non-i8 element type to i8 if the index is an
3417 // exact instruction. If the index is an exact instruction (div/shr)
3418 // with a constant RHS, we can fold the non-i8 element scale into the
3419 // div/shr (similiar to the mul case, just inverted).
3420 const APInt *C;
3421 std::optional<APInt> NewC;
3422 if (has_single_bit(TyAllocSize) &&
3423 match(ExactIns, m_Shr(m_Value(V), m_APInt(C))) &&
3424 C->uge(countr_zero(TyAllocSize)))
3425 NewC = *C - countr_zero(TyAllocSize);
3426 else if (match(ExactIns, m_UDiv(m_Value(V), m_APInt(C)))) {
3427 APInt Quot;
3428 uint64_t Rem;
3429 APInt::udivrem(*C, TyAllocSize, Quot, Rem);
3430 if (Rem == 0)
3431 NewC = Quot;
3432 } else if (match(ExactIns, m_SDiv(m_Value(V), m_APInt(C)))) {
3433 APInt Quot;
3434 int64_t Rem;
3435 APInt::sdivrem(*C, TyAllocSize, Quot, Rem);
3436 // For sdiv we need to make sure we arent creating INT_MIN / -1.
3437 if (!Quot.isAllOnes() && Rem == 0)
3438 NewC = Quot;
3439 }
3440
3441 if (NewC.has_value()) {
3442 Value *NewOp = Builder.CreateBinOp(
3443 static_cast<Instruction::BinaryOps>(ExactIns->getOpcode()), V,
3444 ConstantInt::get(V->getType(), *NewC));
3445 cast<BinaryOperator>(NewOp)->setIsExact();
3446 return GetElementPtrInst::Create(Builder.getInt8Ty(),
3447 GEP.getPointerOperand(), NewOp,
3448 GEP.getNoWrapFlags());
3449 }
3450 }
3451 }
3452 }
3453 }
3454 // We do not handle pointer-vector geps here.
3455 if (GEPType->isVectorTy())
3456 return nullptr;
3457
3458 if (!GEP.isInBounds()) {
3459 unsigned IdxWidth =
3460 DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
3461 APInt BasePtrOffset(IdxWidth, 0);
3462 Value *UnderlyingPtrOp =
3463 PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL, BasePtrOffset);
3464 bool CanBeNull, CanBeFreed;
3465 uint64_t DerefBytes = UnderlyingPtrOp->getPointerDereferenceableBytes(
3466 DL, CanBeNull, CanBeFreed);
3467 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3468 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
3469 BasePtrOffset.isNonNegative()) {
3470 APInt AllocSize(IdxWidth, DerefBytes);
3471 if (BasePtrOffset.ule(AllocSize)) {
3473 GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
3474 }
3475 }
3476 }
3477 }
3478
3479 // nusw + nneg -> nuw
3480 if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() &&
3481 all_of(GEP.indices(), [&](Value *Idx) {
3482 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3483 })) {
3484 GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap());
3485 return &GEP;
3486 }
3487
3488 // These rewrites are trying to preserve inbounds/nuw attributes. So we want
3489 // to do this after having tried to derive "nuw" above.
3490 if (GEP.getNumIndices() == 1) {
3491 // Given (gep p, x+y) we want to determine the common nowrap flags for both
3492 // geps if transforming into (gep (gep p, x), y).
3493 auto GetPreservedNoWrapFlags = [&](bool AddIsNUW) {
3494 // We can preserve both "inbounds nuw", "nusw nuw" and "nuw" if we know
3495 // that x + y does not have unsigned wrap.
3496 if (GEP.hasNoUnsignedWrap() && AddIsNUW)
3497 return GEP.getNoWrapFlags();
3498 return GEPNoWrapFlags::none();
3499 };
3500
3501 // Try to replace ADD + GEP with GEP + GEP.
3502 Value *Idx1, *Idx2;
3503 if (match(GEP.getOperand(1),
3504 m_OneUse(m_AddLike(m_Value(Idx1), m_Value(Idx2))))) {
3505 // %idx = add i64 %idx1, %idx2
3506 // %gep = getelementptr i32, ptr %ptr, i64 %idx
3507 // as:
3508 // %newptr = getelementptr i32, ptr %ptr, i64 %idx1
3509 // %newgep = getelementptr i32, ptr %newptr, i64 %idx2
3510 bool NUW = match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()));
3511 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3512 auto *NewPtr =
3513 Builder.CreateGEP(GEP.getSourceElementType(), GEP.getPointerOperand(),
3514 Idx1, "", NWFlags);
3515 return replaceInstUsesWith(GEP,
3516 Builder.CreateGEP(GEP.getSourceElementType(),
3517 NewPtr, Idx2, "", NWFlags));
3518 }
3519 ConstantInt *C;
3520 if (match(GEP.getOperand(1), m_OneUse(m_SExtLike(m_OneUse(m_NSWAddLike(
3521 m_Value(Idx1), m_ConstantInt(C))))))) {
3522 // %add = add nsw i32 %idx1, idx2
3523 // %sidx = sext i32 %add to i64
3524 // %gep = getelementptr i32, ptr %ptr, i64 %sidx
3525 // as:
3526 // %newptr = getelementptr i32, ptr %ptr, i32 %idx1
3527 // %newgep = getelementptr i32, ptr %newptr, i32 idx2
3528 bool NUW = match(GEP.getOperand(1),
3530 GEPNoWrapFlags NWFlags = GetPreservedNoWrapFlags(NUW);
3531 auto *NewPtr = Builder.CreateGEP(
3532 GEP.getSourceElementType(), GEP.getPointerOperand(),
3533 Builder.CreateSExt(Idx1, GEP.getOperand(1)->getType()), "", NWFlags);
3534 return replaceInstUsesWith(
3535 GEP,
3536 Builder.CreateGEP(GEP.getSourceElementType(), NewPtr,
3537 Builder.CreateSExt(C, GEP.getOperand(1)->getType()),
3538 "", NWFlags));
3539 }
3540 }
3541
3543 return R;
3544
3545 return nullptr;
3546}
3547
3549 Instruction *AI) {
3551 return true;
3552 if (auto *LI = dyn_cast<LoadInst>(V))
3553 return isa<GlobalVariable>(LI->getPointerOperand());
3554 // Two distinct allocations will never be equal.
3555 return isAllocLikeFn(V, &TLI) && V != AI;
3556}
3557
3558/// Given a call CB which uses an address UsedV, return true if we can prove the
3559/// call's only possible effect is storing to V.
3560static bool isRemovableWrite(CallBase &CB, Value *UsedV,
3561 const TargetLibraryInfo &TLI) {
3562 if (!CB.use_empty())
3563 // TODO: add recursion if returned attribute is present
3564 return false;
3565
3566 if (CB.isTerminator())
3567 // TODO: remove implementation restriction
3568 return false;
3569
3570 if (!CB.willReturn() || !CB.doesNotThrow())
3571 return false;
3572
3573 // If the only possible side effect of the call is writing to the alloca,
3574 // and the result isn't used, we can safely remove any reads implied by the
3575 // call including those which might read the alloca itself.
3576 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
3577 return Dest && Dest->Ptr == UsedV;
3578}
3579
3580static std::optional<ModRefInfo>
3582 const TargetLibraryInfo &TLI, bool KnowInit) {
3584 const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
3585 Worklist.push_back(AI);
3587
3588 do {
3589 Instruction *PI = Worklist.pop_back_val();
3590 for (User *U : PI->users()) {
3592 switch (I->getOpcode()) {
3593 default:
3594 // Give up the moment we see something we can't handle.
3595 return std::nullopt;
3596
3597 case Instruction::AddrSpaceCast:
3598 case Instruction::BitCast:
3599 case Instruction::GetElementPtr:
3600 Users.emplace_back(I);
3601 Worklist.push_back(I);
3602 continue;
3603
3604 case Instruction::ICmp: {
3605 ICmpInst *ICI = cast<ICmpInst>(I);
3606 // We can fold eq/ne comparisons with null to false/true, respectively.
3607 // We also fold comparisons in some conditions provided the alloc has
3608 // not escaped (see isNeverEqualToUnescapedAlloc).
3609 if (!ICI->isEquality())
3610 return std::nullopt;
3611 unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
3612 if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
3613 return std::nullopt;
3614
3615 // Do not fold compares to aligned_alloc calls, as they may have to
3616 // return null in case the required alignment cannot be satisfied,
3617 // unless we can prove that both alignment and size are valid.
3618 auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
3619 // Check if alignment and size of a call to aligned_alloc is valid,
3620 // that is alignment is a power-of-2 and the size is a multiple of the
3621 // alignment.
3622 const APInt *Alignment;
3623 const APInt *Size;
3624 return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
3625 match(CB->getArgOperand(1), m_APInt(Size)) &&
3626 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
3627 };
3628 auto *CB = dyn_cast<CallBase>(AI);
3629 LibFunc TheLibFunc;
3630 if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3631 TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3632 !AlignmentAndSizeKnownValid(CB))
3633 return std::nullopt;
3634 Users.emplace_back(I);
3635 continue;
3636 }
3637
3638 case Instruction::Call:
3639 // Ignore no-op and store intrinsics.
3641 switch (II->getIntrinsicID()) {
3642 default:
3643 return std::nullopt;
3644
3645 case Intrinsic::memmove:
3646 case Intrinsic::memcpy:
3647 case Intrinsic::memset: {
3649 if (MI->isVolatile())
3650 return std::nullopt;
3651 // Note: this could also be ModRef, but we can still interpret that
3652 // as just Mod in that case.
3653 ModRefInfo NewAccess =
3654 MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
3655 if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
3656 return std::nullopt;
3657 Access |= NewAccess;
3658 [[fallthrough]];
3659 }
3660 case Intrinsic::assume:
3661 case Intrinsic::invariant_start:
3662 case Intrinsic::invariant_end:
3663 case Intrinsic::lifetime_start:
3664 case Intrinsic::lifetime_end:
3665 case Intrinsic::objectsize:
3666 Users.emplace_back(I);
3667 continue;
3668 case Intrinsic::launder_invariant_group:
3669 case Intrinsic::strip_invariant_group:
3670 Users.emplace_back(I);
3671 Worklist.push_back(I);
3672 continue;
3673 }
3674 }
3675
3676 if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
3677 getAllocationFamily(I, &TLI) == Family) {
3678 Users.emplace_back(I);
3679 continue;
3680 }
3681
3682 if (Family && getReallocatedOperand(cast<CallBase>(I)) == PI &&
3683 getAllocationFamily(I, &TLI) == Family) {
3684 Users.emplace_back(I);
3685 Worklist.push_back(I);
3686 continue;
3687 }
3688
3689 if (!isRefSet(Access) &&
3690 isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
3692 Users.emplace_back(I);
3693 continue;
3694 }
3695
3696 return std::nullopt;
3697
3698 case Instruction::Store: {
3700 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3701 return std::nullopt;
3702 if (isRefSet(Access))
3703 return std::nullopt;
3705 Users.emplace_back(I);
3706 continue;
3707 }
3708
3709 case Instruction::Load: {
3710 LoadInst *LI = cast<LoadInst>(I);
3711 if (LI->isVolatile() || LI->getPointerOperand() != PI)
3712 return std::nullopt;
3713 if (isModSet(Access))
3714 return std::nullopt;
3716 Users.emplace_back(I);
3717 continue;
3718 }
3719 }
3720 llvm_unreachable("missing a return?");
3721 }
3722 } while (!Worklist.empty());
3723
3725 return Access;
3726}
3727
3730
3731 // If we have a malloc call which is only used in any amount of comparisons to
3732 // null and free calls, delete the calls and replace the comparisons with true
3733 // or false as appropriate.
3734
3735 // This is based on the principle that we can substitute our own allocation
3736 // function (which will never return null) rather than knowledge of the
3737 // specific function being called. In some sense this can change the permitted
3738 // outputs of a program (when we convert a malloc to an alloca, the fact that
3739 // the allocation is now on the stack is potentially visible, for example),
3740 // but we believe in a permissible manner.
3742
3743 // If we are removing an alloca with a dbg.declare, insert dbg.value calls
3744 // before each store.
3746 std::unique_ptr<DIBuilder> DIB;
3747 if (isa<AllocaInst>(MI)) {
3748 findDbgUsers(&MI, DVRs);
3749 DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
3750 }
3751
3752 // Determine what getInitialValueOfAllocation would return without actually
3753 // allocating the result.
3754 bool KnowInitUndef = false;
3755 bool KnowInitZero = false;
3756 Constant *Init =
3758 if (Init) {
3759 if (isa<UndefValue>(Init))
3760 KnowInitUndef = true;
3761 else if (Init->isNullValue())
3762 KnowInitZero = true;
3763 }
3764 // The various sanitizers don't actually return undef memory, but rather
3765 // memory initialized with special forms of runtime poison
3766 auto &F = *MI.getFunction();
3767 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
3768 F.hasFnAttribute(Attribute::SanitizeAddress))
3769 KnowInitUndef = false;
3770
3771 auto Removable =
3772 isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
3773 if (Removable) {
3774 for (WeakTrackingVH &User : Users) {
3775 // Lowering all @llvm.objectsize and MTI calls first because they may use
3776 // a bitcast/GEP of the alloca we are removing.
3777 if (!User)
3778 continue;
3779
3781
3783 if (II->getIntrinsicID() == Intrinsic::objectsize) {
3784 SmallVector<Instruction *> InsertedInstructions;
3785 Value *Result = lowerObjectSizeCall(
3786 II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
3787 for (Instruction *Inserted : InsertedInstructions)
3788 Worklist.add(Inserted);
3789 replaceInstUsesWith(*I, Result);
3791 User = nullptr; // Skip examining in the next loop.
3792 continue;
3793 }
3794 if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
3795 if (KnowInitZero && isRefSet(*Removable)) {
3797 Builder.SetInsertPoint(MTI);
3798 auto *M = Builder.CreateMemSet(
3799 MTI->getRawDest(),
3800 ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
3801 MTI->getLength(), MTI->getDestAlign());
3802 M->copyMetadata(*MTI);
3803 }
3804 }
3805 }
3806 }
3807 for (WeakTrackingVH &User : Users) {
3808 if (!User)
3809 continue;
3810
3812
3813 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
3815 ConstantInt::get(Type::getInt1Ty(C->getContext()),
3816 C->isFalseWhenEqual()));
3817 } else if (auto *SI = dyn_cast<StoreInst>(I)) {
3818 for (auto *DVR : DVRs)
3819 if (DVR->isAddressOfVariable())
3821 } else {
3822 // Casts, GEP, or anything else: we're about to delete this instruction,
3823 // so it can not have any valid uses.
3824 Constant *Replace;
3825 if (isa<LoadInst>(I)) {
3826 assert(KnowInitZero || KnowInitUndef);
3827 Replace = KnowInitUndef ? UndefValue::get(I->getType())
3828 : Constant::getNullValue(I->getType());
3829 } else
3830 Replace = PoisonValue::get(I->getType());
3831 replaceInstUsesWith(*I, Replace);
3832 }
3834 }
3835
3837 // Replace invoke with a NOP intrinsic to maintain the original CFG
3838 Module *M = II->getModule();
3839 Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
3840 auto *NewII = InvokeInst::Create(
3841 F, II->getNormalDest(), II->getUnwindDest(), {}, "", II->getParent());
3842 NewII->setDebugLoc(II->getDebugLoc());
3843 }
3844
3845 // Remove debug intrinsics which describe the value contained within the
3846 // alloca. In addition to removing dbg.{declare,addr} which simply point to
3847 // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
3848 //
3849 // ```
3850 // define void @foo(i32 %0) {
3851 // %a = alloca i32 ; Deleted.
3852 // store i32 %0, i32* %a
3853 // dbg.value(i32 %0, "arg0") ; Not deleted.
3854 // dbg.value(i32* %a, "arg0", DW_OP_deref) ; Deleted.
3855 // call void @trivially_inlinable_no_op(i32* %a)
3856 // ret void
3857 // }
3858 // ```
3859 //
3860 // This may not be required if we stop describing the contents of allocas
3861 // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
3862 // the LowerDbgDeclare utility.
3863 //
3864 // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
3865 // "arg0" dbg.value may be stale after the call. However, failing to remove
3866 // the DW_OP_deref dbg.value causes large gaps in location coverage.
3867 //
3868 // FIXME: the Assignment Tracking project has now likely made this
3869 // redundant (and it's sometimes harmful).
3870 for (auto *DVR : DVRs)
3871 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3872 DVR->eraseFromParent();
3873
3874 return eraseInstFromFunction(MI);
3875 }
3876 return nullptr;
3877}
3878
3879/// Move the call to free before a NULL test.
3880///
3881/// Check if this free is accessed after its argument has been test
3882/// against NULL (property 0).
3883/// If yes, it is legal to move this call in its predecessor block.
3884///
3885/// The move is performed only if the block containing the call to free
3886/// will be removed, i.e.:
3887/// 1. it has only one predecessor P, and P has two successors
3888/// 2. it contains the call, noops, and an unconditional branch
3889/// 3. its successor is the same as its predecessor's successor
3890///
3891/// The profitability is out-of concern here and this function should
3892/// be called only if the caller knows this transformation would be
3893/// profitable (e.g., for code size).
3895 const DataLayout &DL) {
3896 Value *Op = FI.getArgOperand(0);
3897 BasicBlock *FreeInstrBB = FI.getParent();
3898 BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
3899
3900 // Validate part of constraint #1: Only one predecessor
3901 // FIXME: We can extend the number of predecessor, but in that case, we
3902 // would duplicate the call to free in each predecessor and it may
3903 // not be profitable even for code size.
3904 if (!PredBB)
3905 return nullptr;
3906
3907 // Validate constraint #2: Does this block contains only the call to
3908 // free, noops, and an unconditional branch?
3909 BasicBlock *SuccBB;
3910 Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
3911 if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
3912 return nullptr;
3913
3914 // If there are only 2 instructions in the block, at this point,
3915 // this is the call to free and unconditional.
3916 // If there are more than 2 instructions, check that they are noops
3917 // i.e., they won't hurt the performance of the generated code.
3918 if (FreeInstrBB->size() != 2) {
3919 for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
3920 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3921 continue;
3922 auto *Cast = dyn_cast<CastInst>(&Inst);
3923 if (!Cast || !Cast->isNoopCast(DL))
3924 return nullptr;
3925 }
3926 }
3927 // Validate the rest of constraint #1 by matching on the pred branch.
3928 Instruction *TI = PredBB->getTerminator();
3929 BasicBlock *TrueBB, *FalseBB;
3930 CmpPredicate Pred;
3931 if (!match(TI, m_Br(m_ICmp(Pred,
3933 m_Specific(Op->stripPointerCasts())),
3934 m_Zero()),
3935 TrueBB, FalseBB)))
3936 return nullptr;
3937 if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
3938 return nullptr;
3939
3940 // Validate constraint #3: Ensure the null case just falls through.
3941 if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
3942 return nullptr;
3943 assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
3944 "Broken CFG: missing edge from predecessor to successor");
3945
3946 // At this point, we know that everything in FreeInstrBB can be moved
3947 // before TI.
3948 for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
3949 if (&Instr == FreeInstrBBTerminator)
3950 break;
3951 Instr.moveBeforePreserving(TI->getIterator());
3952 }
3953 assert(FreeInstrBB->size() == 1 &&
3954 "Only the branch instruction should remain");
3955
3956 // Now that we've moved the call to free before the NULL check, we have to
3957 // remove any attributes on its parameter that imply it's non-null, because
3958 // those attributes might have only been valid because of the NULL check, and
3959 // we can get miscompiles if we keep them. This is conservative if non-null is
3960 // also implied by something other than the NULL check, but it's guaranteed to
3961 // be correct, and the conservativeness won't matter in practice, since the
3962 // attributes are irrelevant for the call to free itself and the pointer
3963 // shouldn't be used after the call.
3964 AttributeList Attrs = FI.getAttributes();
3965 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
3966 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3967 if (Dereferenceable.isValid()) {
3968 uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
3969 Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
3970 Attribute::Dereferenceable);
3971 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
3972 }
3973 FI.setAttributes(Attrs);
3974
3975 return &FI;
3976}
3977
3979 // free undef -> unreachable.
3980 if (isa<UndefValue>(Op)) {
3981 // Leave a marker since we can't modify the CFG here.
3983 return eraseInstFromFunction(FI);
3984 }
3985
3986 // If we have 'free null' delete the instruction. This can happen in stl code
3987 // when lots of inlining happens.
3989 return eraseInstFromFunction(FI);
3990
3991 // If we had free(realloc(...)) with no intervening uses, then eliminate the
3992 // realloc() entirely.
3994 if (CI && CI->hasOneUse())
3995 if (Value *ReallocatedOp = getReallocatedOperand(CI))
3996 return eraseInstFromFunction(*replaceInstUsesWith(*CI, ReallocatedOp));
3997
3998 // If we optimize for code size, try to move the call to free before the null
3999 // test so that simplify cfg can remove the empty block and dead code
4000 // elimination the branch. I.e., helps to turn something like:
4001 // if (foo) free(foo);
4002 // into
4003 // free(foo);
4004 //
4005 // Note that we can only do this for 'free' and not for any flavor of
4006 // 'operator delete'; there is no 'operator delete' symbol for which we are
4007 // permitted to invent a call, even if we're passing in a null pointer.
4008 if (MinimizeSize) {
4009 LibFunc Func;
4010 if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
4012 return I;
4013 }
4014
4015 return nullptr;
4016}
4017
4019 Value *RetVal = RI.getReturnValue();
4020 if (!RetVal)
4021 return nullptr;
4022
4023 Function *F = RI.getFunction();
4024 Type *RetTy = RetVal->getType();
4025 if (RetTy->isPointerTy()) {
4026 bool HasDereferenceable =
4027 F->getAttributes().getRetDereferenceableBytes() > 0;
4028 if (F->hasRetAttribute(Attribute::NonNull) ||
4029 (HasDereferenceable &&
4031 if (Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4032 return replaceOperand(RI, 0, V);
4033 }
4034 }
4035
4036 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4037 return nullptr;
4038
4039 FPClassTest ReturnClass = F->getAttributes().getRetNoFPClass();
4040 if (ReturnClass == fcNone)
4041 return nullptr;
4042
4043 KnownFPClass KnownClass;
4044 Value *Simplified =
4045 SimplifyDemandedUseFPClass(RetVal, ~ReturnClass, KnownClass, &RI);
4046 if (!Simplified)
4047 return nullptr;
4048
4049 return ReturnInst::Create(RI.getContext(), Simplified);
4050}
4051
4052// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
4054 // Try to remove the previous instruction if it must lead to unreachable.
4055 // This includes instructions like stores and "llvm.assume" that may not get
4056 // removed by simple dead code elimination.
4057 bool Changed = false;
4058 while (Instruction *Prev = I.getPrevNode()) {
4059 // While we theoretically can erase EH, that would result in a block that
4060 // used to start with an EH no longer starting with EH, which is invalid.
4061 // To make it valid, we'd need to fixup predecessors to no longer refer to
4062 // this block, but that changes CFG, which is not allowed in InstCombine.
4063 if (Prev->isEHPad())
4064 break; // Can not drop any more instructions. We're done here.
4065
4067 break; // Can not drop any more instructions. We're done here.
4068 // Otherwise, this instruction can be freely erased,
4069 // even if it is not side-effect free.
4070
4071 // A value may still have uses before we process it here (for example, in
4072 // another unreachable block), so convert those to poison.
4073 replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
4074 eraseInstFromFunction(*Prev);
4075 Changed = true;
4076 }
4077 return Changed;
4078}
4079
4084
4086 assert(BI.isUnconditional() && "Only for unconditional branches.");
4087
4088 // If this store is the second-to-last instruction in the basic block
4089 // (excluding debug info) and if the block ends with
4090 // an unconditional branch, try to move the store to the successor block.
4091
4092 auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
4093 BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
4094 do {
4095 if (BBI != FirstInstr)
4096 --BBI;
4097 } while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4098
4099 return dyn_cast<StoreInst>(BBI);
4100 };
4101
4102 if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
4104 return &BI;
4105
4106 return nullptr;
4107}
4108
4111 if (!DeadEdges.insert({From, To}).second)
4112 return;
4113
4114 // Replace phi node operands in successor with poison.
4115 for (PHINode &PN : To->phis())
4116 for (Use &U : PN.incoming_values())
4117 if (PN.getIncomingBlock(U) == From && !isa<PoisonValue>(U)) {
4118 replaceUse(U, PoisonValue::get(PN.getType()));
4119 addToWorklist(&PN);
4120 MadeIRChange = true;
4121 }
4122
4123 Worklist.push_back(To);
4124}
4125
4126// Under the assumption that I is unreachable, remove it and following
4127// instructions. Changes are reported directly to MadeIRChange.
4130 BasicBlock *BB = I->getParent();
4131 for (Instruction &Inst : make_early_inc_range(
4132 make_range(std::next(BB->getTerminator()->getReverseIterator()),
4133 std::next(I->getReverseIterator())))) {
4134 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4135 replaceInstUsesWith(Inst, PoisonValue::get(Inst.getType()));
4136 MadeIRChange = true;
4137 }
4138 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4139 continue;
4140 // RemoveDIs: erase debug-info on this instruction manually.
4141 Inst.dropDbgRecords();
4143 MadeIRChange = true;
4144 }
4145
4148 MadeIRChange = true;
4149 for (Value *V : Changed)
4151 }
4152
4153 // Handle potentially dead successors.
4154 for (BasicBlock *Succ : successors(BB))
4155 addDeadEdge(BB, Succ, Worklist);
4156}
4157
4160 while (!Worklist.empty()) {
4161 BasicBlock *BB = Worklist.pop_back_val();
4162 if (!all_of(predecessors(BB), [&](BasicBlock *Pred) {
4163 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
4164 }))
4165 continue;
4166
4168 }
4169}
4170
4172 BasicBlock *LiveSucc) {
4174 for (BasicBlock *Succ : successors(BB)) {
4175 // The live successor isn't dead.
4176 if (Succ == LiveSucc)
4177 continue;
4178
4179 addDeadEdge(BB, Succ, Worklist);
4180 }
4181
4183}
4184
4186 if (BI.isUnconditional())
4188
4189 // Change br (not X), label True, label False to: br X, label False, True
4190 Value *Cond = BI.getCondition();
4191 Value *X;
4192 if (match(Cond, m_Not(m_Value(X))) && !isa<Constant>(X)) {
4193 // Swap Destinations and condition...
4194 BI.swapSuccessors();
4195 if (BPI)
4196 BPI->swapSuccEdgesProbabilities(BI.getParent());
4197 return replaceOperand(BI, 0, X);
4198 }
4199
4200 // Canonicalize logical-and-with-invert as logical-or-with-invert.
4201 // This is done by inverting the condition and swapping successors:
4202 // br (X && !Y), T, F --> br !(X && !Y), F, T --> br (!X || Y), F, T
4203 Value *Y;
4204 if (isa<SelectInst>(Cond) &&
4205 match(Cond,
4207 Value *NotX = Builder.CreateNot(X, "not." + X->getName());
4208 Value *Or = Builder.CreateLogicalOr(NotX, Y);
4209 BI.swapSuccessors();
4210 if (BPI)
4211 BPI->swapSuccEdgesProbabilities(BI.getParent());
4212 return replaceOperand(BI, 0, Or);
4213 }
4214
4215 // If the condition is irrelevant, remove the use so that other
4216 // transforms on the condition become more effective.
4217 if (!isa<ConstantInt>(Cond) && BI.getSuccessor(0) == BI.getSuccessor(1))
4218 return replaceOperand(BI, 0, ConstantInt::getFalse(Cond->getType()));
4219
4220 // Canonicalize, for example, fcmp_one -> fcmp_oeq.
4221 CmpPredicate Pred;
4222 if (match(Cond, m_OneUse(m_FCmp(Pred, m_Value(), m_Value()))) &&
4223 !isCanonicalPredicate(Pred)) {
4224 // Swap destinations and condition.
4225 auto *Cmp = cast<CmpInst>(Cond);
4226 Cmp->setPredicate(CmpInst::getInversePredicate(Pred));
4227 BI.swapSuccessors();
4228 if (BPI)
4229 BPI->swapSuccEdgesProbabilities(BI.getParent());
4230 Worklist.push(Cmp);
4231 return &BI;
4232 }
4233
4234 if (isa<UndefValue>(Cond)) {
4235 handlePotentiallyDeadSuccessors(BI.getParent(), /*LiveSucc*/ nullptr);
4236 return nullptr;
4237 }
4238 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4240 BI.getSuccessor(!CI->getZExtValue()));
4241 return nullptr;
4242 }
4243
4244 // Replace all dominated uses of the condition with true/false
4245 // Ignore constant expressions to avoid iterating over uses on other
4246 // functions.
4247 if (!isa<Constant>(Cond) && BI.getSuccessor(0) != BI.getSuccessor(1)) {
4248 for (auto &U : make_early_inc_range(Cond->uses())) {
4249 BasicBlockEdge Edge0(BI.getParent(), BI.getSuccessor(0));
4250 if (DT.dominates(Edge0, U)) {
4251 replaceUse(U, ConstantInt::getTrue(Cond->getType()));
4252 addToWorklist(cast<Instruction>(U.getUser()));
4253 continue;
4254 }
4255 BasicBlockEdge Edge1(BI.getParent(), BI.getSuccessor(1));
4256 if (DT.dominates(Edge1, U)) {
4257 replaceUse(U, ConstantInt::getFalse(Cond->getType()));
4258 addToWorklist(cast<Instruction>(U.getUser()));
4259 }
4260 }
4261 }
4262
4263 DC.registerBranch(&BI);
4264 return nullptr;
4265}
4266
4267// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
4268// we can prove that both (switch C) and (switch X) go to the default when cond
4269// is false/true.
4272 bool IsTrueArm) {
4273 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4274 auto *C = dyn_cast<ConstantInt>(Select->getOperand(CstOpIdx));
4275 if (!C)
4276 return nullptr;
4277
4278 BasicBlock *CstBB = SI.findCaseValue(C)->getCaseSuccessor();
4279 if (CstBB != SI.getDefaultDest())
4280 return nullptr;
4281 Value *X = Select->getOperand(3 - CstOpIdx);
4282 CmpPredicate Pred;
4283 const APInt *RHSC;
4284 if (!match(Select->getCondition(),
4285 m_ICmp(Pred, m_Specific(X), m_APInt(RHSC))))
4286 return nullptr;
4287 if (IsTrueArm)
4288 Pred = ICmpInst::getInversePredicate(Pred);
4289
4290 // See whether we can replace the select with X
4292 for (auto Case : SI.cases())
4293 if (!CR.contains(Case.getCaseValue()->getValue()))
4294 return nullptr;
4295
4296 return X;
4297}
4298
4300 Value *Cond = SI.getCondition();
4301 Value *Op0;
4302 ConstantInt *AddRHS;
4303 if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
4304 // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
4305 for (auto Case : SI.cases()) {
4306 Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
4307 assert(isa<ConstantInt>(NewCase) &&
4308 "Result of expression should be constant");
4309 Case.setValue(cast<ConstantInt>(NewCase));
4310 }
4311 return replaceOperand(SI, 0, Op0);
4312 }
4313
4314 ConstantInt *SubLHS;
4315 if (match(Cond, m_Sub(m_ConstantInt(SubLHS), m_Value(Op0)))) {
4316 // Change 'switch (1-X) case 1:' into 'switch (X) case 0'.
4317 for (auto Case : SI.cases()) {
4318 Constant *NewCase = ConstantExpr::getSub(SubLHS, Case.getCaseValue());
4319 assert(isa<ConstantInt>(NewCase) &&
4320 "Result of expression should be constant");
4321 Case.setValue(cast<ConstantInt>(NewCase));
4322 }
4323 return replaceOperand(SI, 0, Op0);
4324 }
4325
4326 uint64_t ShiftAmt;
4327 if (match(Cond, m_Shl(m_Value(Op0), m_ConstantInt(ShiftAmt))) &&
4328 ShiftAmt < Op0->getType()->getScalarSizeInBits() &&
4329 all_of(SI.cases(), [&](const auto &Case) {
4330 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4331 })) {
4332 // Change 'switch (X << 2) case 4:' into 'switch (X) case 1:'.
4334 if (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap() ||
4335 Shl->hasOneUse()) {
4336 Value *NewCond = Op0;
4337 if (!Shl->hasNoUnsignedWrap() && !Shl->hasNoSignedWrap()) {
4338 // If the shift may wrap, we need to mask off the shifted bits.
4339 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4340 NewCond = Builder.CreateAnd(
4341 Op0, APInt::getLowBitsSet(BitWidth, BitWidth - ShiftAmt));
4342 }
4343 for (auto Case : SI.cases()) {
4344 const APInt &CaseVal = Case.getCaseValue()->getValue();
4345 APInt ShiftedCase = Shl->hasNoSignedWrap() ? CaseVal.ashr(ShiftAmt)
4346 : CaseVal.lshr(ShiftAmt);
4347 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
4348 }
4349 return replaceOperand(SI, 0, NewCond);
4350 }
4351 }
4352
4353 // Fold switch(zext/sext(X)) into switch(X) if possible.
4354 if (match(Cond, m_ZExtOrSExt(m_Value(Op0)))) {
4355 bool IsZExt = isa<ZExtInst>(Cond);
4356 Type *SrcTy = Op0->getType();
4357 unsigned NewWidth = SrcTy->getScalarSizeInBits();
4358
4359 if (all_of(SI.cases(), [&](const auto &Case) {
4360 const APInt &CaseVal = Case.getCaseValue()->getValue();
4361 return IsZExt ? CaseVal.isIntN(NewWidth)
4362 : CaseVal.isSignedIntN(NewWidth);
4363 })) {
4364 for (auto &Case : SI.cases()) {
4365 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4366 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4367 }
4368 return replaceOperand(SI, 0, Op0);
4369 }
4370 }
4371
4372 // Fold switch(select cond, X, Y) into switch(X/Y) if possible
4373 if (auto *Select = dyn_cast<SelectInst>(Cond)) {
4374 if (Value *V =
4375 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/true))
4376 return replaceOperand(SI, 0, V);
4377 if (Value *V =
4378 simplifySwitchOnSelectUsingRanges(SI, Select, /*IsTrueArm=*/false))
4379 return replaceOperand(SI, 0, V);
4380 }
4381
4382 KnownBits Known = computeKnownBits(Cond, &SI);
4383 unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
4384 unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
4385
4386 // Compute the number of leading bits we can ignore.
4387 // TODO: A better way to determine this would use ComputeNumSignBits().
4388 for (const auto &C : SI.cases()) {
4389 LeadingKnownZeros =
4390 std::min(LeadingKnownZeros, C.getCaseValue()->getValue().countl_zero());
4391 LeadingKnownOnes =
4392 std::min(LeadingKnownOnes, C.getCaseValue()->getValue().countl_one());
4393 }
4394
4395 unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4396
4397 // Shrink the condition operand if the new type is smaller than the old type.
4398 // But do not shrink to a non-standard type, because backend can't generate
4399 // good code for that yet.
4400 // TODO: We can make it aggressive again after fixing PR39569.
4401 if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
4402 shouldChangeType(Known.getBitWidth(), NewWidth)) {
4403 IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
4404 Builder.SetInsertPoint(&SI);
4405 Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
4406
4407 for (auto Case : SI.cases()) {
4408 APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
4409 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
4410 }
4411 return replaceOperand(SI, 0, NewCond);
4412 }
4413
4414 if (isa<UndefValue>(Cond)) {
4415 handlePotentiallyDeadSuccessors(SI.getParent(), /*LiveSucc*/ nullptr);
4416 return nullptr;
4417 }
4418 if (auto *CI = dyn_cast<ConstantInt>(Cond)) {
4420 SI.findCaseValue(CI)->getCaseSuccessor());
4421 return nullptr;
4422 }
4423
4424 return nullptr;
4425}
4426
4428InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
4430 if (!WO)
4431 return nullptr;
4432
4433 Intrinsic::ID OvID = WO->getIntrinsicID();
4434 const APInt *C = nullptr;
4435 if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
4436 if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4437 OvID == Intrinsic::umul_with_overflow)) {
4438 // extractvalue (any_mul_with_overflow X, -1), 0 --> -X
4439 if (C->isAllOnes())
4440 return BinaryOperator::CreateNeg(WO->getLHS());
4441 // extractvalue (any_mul_with_overflow X, 2^n), 0 --> X << n
4442 if (C->isPowerOf2()) {
4443 return BinaryOperator::CreateShl(
4444 WO->getLHS(),
4445 ConstantInt::get(WO->getLHS()->getType(), C->logBase2()));
4446 }
4447 }
4448 }
4449
4450 // We're extracting from an overflow intrinsic. See if we're the only user.
4451 // That allows us to simplify multiple result intrinsics to simpler things
4452 // that just get one value.
4453 if (!WO->hasOneUse())
4454 return nullptr;
4455
4456 // Check if we're grabbing only the result of a 'with overflow' intrinsic
4457 // and replace it with a traditional binary instruction.
4458 if (*EV.idx_begin() == 0) {
4459 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4460 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
4461 // Replace the old instruction's uses with poison.
4462 replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
4464 return BinaryOperator::Create(BinOp, LHS, RHS);
4465 }
4466
4467 assert(*EV.idx_begin() == 1 && "Unexpected extract index for overflow inst");
4468
4469 // (usub LHS, RHS) overflows when LHS is unsigned-less-than RHS.
4470 if (OvID == Intrinsic::usub_with_overflow)
4471 return new ICmpInst(ICmpInst::ICMP_ULT, WO->getLHS(), WO->getRHS());
4472
4473 // smul with i1 types overflows when both sides are set: -1 * -1 == +1, but
4474 // +1 is not possible because we assume signed values.
4475 if (OvID == Intrinsic::smul_with_overflow &&
4476 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4477 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4478
4479 // extractvalue (umul_with_overflow X, X), 1 -> X u> 2^(N/2)-1
4480 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4481 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4482 // Only handle even bitwidths for performance reasons.
4483 if (BitWidth % 2 == 0)
4484 return new ICmpInst(
4485 ICmpInst::ICMP_UGT, WO->getLHS(),
4486 ConstantInt::get(WO->getLHS()->getType(),
4488 }
4489
4490 // If only the overflow result is used, and the right hand side is a
4491 // constant (or constant splat), we can remove the intrinsic by directly
4492 // checking for overflow.
4493 if (C) {
4494 // Compute the no-wrap range for LHS given RHS=C, then construct an
4495 // equivalent icmp, potentially using an offset.
4496 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
4497 WO->getBinaryOp(), *C, WO->getNoWrapKind());
4498
4499 CmpInst::Predicate Pred;
4500 APInt NewRHSC, Offset;
4501 NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
4502 auto *OpTy = WO->getRHS()->getType();
4503 auto *NewLHS = WO->getLHS();
4504 if (Offset != 0)
4505 NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
4506 return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
4507 ConstantInt::get(OpTy, NewRHSC));
4508 }
4509
4510 return nullptr;
4511}
4512
4515 InstCombiner::BuilderTy &Builder) {
4516 // Helper to fold frexp of select to select of frexp.
4517
4518 if (!SelectInst->hasOneUse() || !FrexpCall->hasOneUse())
4519 return nullptr;
4521 Value *TrueVal = SelectInst->getTrueValue();
4522 Value *FalseVal = SelectInst->getFalseValue();
4523
4524 const APFloat *ConstVal = nullptr;
4525 Value *VarOp = nullptr;
4526 bool ConstIsTrue = false;
4527
4528 if (match(TrueVal, m_APFloat(ConstVal))) {
4529 VarOp = FalseVal;
4530 ConstIsTrue = true;
4531 } else if (match(FalseVal, m_APFloat(ConstVal))) {
4532 VarOp = TrueVal;
4533 ConstIsTrue = false;
4534 } else {
4535 return nullptr;
4536 }
4537
4538 Builder.SetInsertPoint(&EV);
4539
4540 CallInst *NewFrexp =
4541 Builder.CreateCall(FrexpCall->getCalledFunction(), {VarOp}, "frexp");
4542 NewFrexp->copyIRFlags(FrexpCall);
4543
4544 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0, "mantissa");
4545
4546 int Exp;
4547 APFloat Mantissa = frexp(*ConstVal, Exp, APFloat::rmNearestTiesToEven);
4548
4549 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4550
4551 Value *NewSel = Builder.CreateSelectFMF(
4552 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4553 ConstIsTrue ? NewEV : ConstantMantissa, SelectInst, "select.frexp");
4554 return NewSel;
4555}
4557 Value *Agg = EV.getAggregateOperand();
4558
4559 if (!EV.hasIndices())
4560 return replaceInstUsesWith(EV, Agg);
4561
4562 if (Value *V = simplifyExtractValueInst(Agg, EV.getIndices(),
4563 SQ.getWithInstruction(&EV)))
4564 return replaceInstUsesWith(EV, V);
4565
4566 Value *Cond, *TrueVal, *FalseVal;
4568 m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal)))))) {
4569 auto *SelInst =
4570 cast<SelectInst>(cast<IntrinsicInst>(Agg)->getArgOperand(0));
4571 if (Value *Result =
4572 foldFrexpOfSelect(EV, cast<IntrinsicInst>(Agg), SelInst, Builder))
4573 return replaceInstUsesWith(EV, Result);
4574 }
4576 // We're extracting from an insertvalue instruction, compare the indices
4577 const unsigned *exti, *exte, *insi, *inse;
4578 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4579 exte = EV.idx_end(), inse = IV->idx_end();
4580 exti != exte && insi != inse;
4581 ++exti, ++insi) {
4582 if (*insi != *exti)
4583 // The insert and extract both reference distinctly different elements.
4584 // This means the extract is not influenced by the insert, and we can
4585 // replace the aggregate operand of the extract with the aggregate
4586 // operand of the insert. i.e., replace
4587 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4588 // %E = extractvalue { i32, { i32 } } %I, 0
4589 // with
4590 // %E = extractvalue { i32, { i32 } } %A, 0
4591 return ExtractValueInst::Create(IV->getAggregateOperand(),
4592 EV.getIndices());
4593 }
4594 if (exti == exte && insi == inse)
4595 // Both iterators are at the end: Index lists are identical. Replace
4596 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4597 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4598 // with "i32 42"
4599 return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
4600 if (exti == exte) {
4601 // The extract list is a prefix of the insert list. i.e. replace
4602 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4603 // %E = extractvalue { i32, { i32 } } %I, 1
4604 // with
4605 // %X = extractvalue { i32, { i32 } } %A, 1
4606 // %E = insertvalue { i32 } %X, i32 42, 0
4607 // by switching the order of the insert and extract (though the
4608 // insertvalue should be left in, since it may have other uses).
4609 Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
4610 EV.getIndices());
4611 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4612 ArrayRef(insi, inse));
4613 }
4614 if (insi == inse)
4615 // The insert list is a prefix of the extract list
4616 // We can simply remove the common indices from the extract and make it
4617 // operate on the inserted value instead of the insertvalue result.
4618 // i.e., replace
4619 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4620 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4621 // with
4622 // %E extractvalue { i32 } { i32 42 }, 0
4623 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4624 ArrayRef(exti, exte));
4625 }
4626
4627 if (Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4628 return R;
4629
4630 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4631 // Bail out if the aggregate contains scalable vector type
4632 if (auto *STy = dyn_cast<StructType>(Agg->getType());
4633 STy && STy->isScalableTy())
4634 return nullptr;
4635
4636 // If the (non-volatile) load only has one use, we can rewrite this to a
4637 // load from a GEP. This reduces the size of the load. If a load is used
4638 // only by extractvalue instructions then this either must have been
4639 // optimized before, or it is a struct with padding, in which case we
4640 // don't want to do the transformation as it loses padding knowledge.
4641 if (L->isSimple() && L->hasOneUse()) {
4642 // extractvalue has integer indices, getelementptr has Value*s. Convert.
4643 SmallVector<Value*, 4> Indices;
4644 // Prefix an i32 0 since we need the first element.
4645 Indices.push_back(Builder.getInt32(0));
4646 for (unsigned Idx : EV.indices())
4647 Indices.push_back(Builder.getInt32(Idx));
4648
4649 // We need to insert these at the location of the old load, not at that of
4650 // the extractvalue.
4651 Builder.SetInsertPoint(L);
4652 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
4653 L->getPointerOperand(), Indices);
4654 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
4655 // Whatever aliasing information we had for the orignal load must also
4656 // hold for the smaller load, so propagate the annotations.
4657 NL->setAAMetadata(L->getAAMetadata());
4658 // Returning the load directly will cause the main loop to insert it in
4659 // the wrong spot, so use replaceInstUsesWith().
4660 return replaceInstUsesWith(EV, NL);
4661 }
4662 }
4663
4664 if (auto *PN = dyn_cast<PHINode>(Agg))
4665 if (Instruction *Res = foldOpIntoPhi(EV, PN))
4666 return Res;
4667
4668 // Canonicalize extract (select Cond, TV, FV)
4669 // -> select cond, (extract TV), (extract FV)
4670 if (auto *SI = dyn_cast<SelectInst>(Agg))
4671 if (Instruction *R = FoldOpIntoSelect(EV, SI, /*FoldWithMultiUse=*/true))
4672 return R;
4673
4674 // We could simplify extracts from other values. Note that nested extracts may
4675 // already be simplified implicitly by the above: extract (extract (insert) )
4676 // will be translated into extract ( insert ( extract ) ) first and then just
4677 // the value inserted, if appropriate. Similarly for extracts from single-use
4678 // loads: extract (extract (load)) will be translated to extract (load (gep))
4679 // and if again single-use then via load (gep (gep)) to load (gep).
4680 // However, double extracts from e.g. function arguments or return values
4681 // aren't handled yet.
4682 return nullptr;
4683}
4684
4685/// Return 'true' if the given typeinfo will match anything.
4686static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
4687 switch (Personality) {
4691 // The GCC C EH and Rust personality only exists to support cleanups, so
4692 // it's not clear what the semantics of catch clauses are.
4693 return false;
4695 return false;
4697 // While __gnat_all_others_value will match any Ada exception, it doesn't
4698 // match foreign exceptions (or didn't, before gcc-4.7).
4699 return false;
4710 return TypeInfo->isNullValue();
4711 }
4712 llvm_unreachable("invalid enum");
4713}
4714
4715static bool shorter_filter(const Value *LHS, const Value *RHS) {
4716 return
4717 cast<ArrayType>(LHS->getType())->getNumElements()
4718 <
4719 cast<ArrayType>(RHS->getType())->getNumElements();
4720}
4721
4723 // The logic here should be correct for any real-world personality function.
4724 // However if that turns out not to be true, the offending logic can always
4725 // be conditioned on the personality function, like the catch-all logic is.
4726 EHPersonality Personality =
4727 classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
4728
4729 // Simplify the list of clauses, eg by removing repeated catch clauses
4730 // (these are often created by inlining).
4731 bool MakeNewInstruction = false; // If true, recreate using the following:
4732 SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
4733 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
4734
4735 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
4736 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
4737 bool isLastClause = i + 1 == e;
4738 if (LI.isCatch(i)) {
4739 // A catch clause.
4740 Constant *CatchClause = LI.getClause(i);
4741 Constant *TypeInfo = CatchClause->stripPointerCasts();
4742
4743 // If we already saw this clause, there is no point in having a second
4744 // copy of it.
4745 if (AlreadyCaught.insert(TypeInfo).second) {
4746 // This catch clause was not already seen.
4747 NewClauses.push_back(CatchClause);
4748 } else {
4749 // Repeated catch clause - drop the redundant copy.
4750 MakeNewInstruction = true;
4751 }
4752
4753 // If this is a catch-all then there is no point in keeping any following
4754 // clauses or marking the landingpad as having a cleanup.
4755 if (isCatchAll(Personality, TypeInfo)) {
4756 if (!isLastClause)
4757 MakeNewInstruction = true;
4758 CleanupFlag = false;
4759 break;
4760 }
4761 } else {
4762 // A filter clause. If any of the filter elements were already caught
4763 // then they can be dropped from the filter. It is tempting to try to
4764 // exploit the filter further by saying that any typeinfo that does not
4765 // occur in the filter can't be caught later (and thus can be dropped).
4766 // However this would be wrong, since typeinfos can match without being
4767 // equal (for example if one represents a C++ class, and the other some
4768 // class derived from it).
4769 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
4770 Constant *FilterClause = LI.getClause(i);
4771 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
4772 unsigned NumTypeInfos = FilterType->getNumElements();
4773
4774 // An empty filter catches everything, so there is no point in keeping any
4775 // following clauses or marking the landingpad as having a cleanup. By
4776 // dealing with this case here the following code is made a bit simpler.
4777 if (!NumTypeInfos) {
4778 NewClauses.push_back(FilterClause);
4779 if (!isLastClause)
4780 MakeNewInstruction = true;
4781 CleanupFlag = false;
4782 break;
4783 }
4784
4785 bool MakeNewFilter = false; // If true, make a new filter.
4786 SmallVector<Constant *, 16> NewFilterElts; // New elements.
4787 if (isa<ConstantAggregateZero>(FilterClause)) {
4788 // Not an empty filter - it contains at least one null typeinfo.
4789 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
4790 Constant *TypeInfo =
4792 // If this typeinfo is a catch-all then the filter can never match.
4793 if (isCatchAll(Personality, TypeInfo)) {
4794 // Throw the filter away.
4795 MakeNewInstruction = true;
4796 continue;
4797 }
4798
4799 // There is no point in having multiple copies of this typeinfo, so
4800 // discard all but the first copy if there is more than one.
4801 NewFilterElts.push_back(TypeInfo);
4802 if (NumTypeInfos > 1)
4803 MakeNewFilter = true;
4804 } else {
4805 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
4806 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
4807 NewFilterElts.reserve(NumTypeInfos);
4808
4809 // Remove any filter elements that were already caught or that already
4810 // occurred in the filter. While there, see if any of the elements are
4811 // catch-alls. If so, the filter can be discarded.
4812 bool SawCatchAll = false;
4813 for (unsigned j = 0; j != NumTypeInfos; ++j) {
4814 Constant *Elt = Filter->getOperand(j);
4815 Constant *TypeInfo = Elt->stripPointerCasts();
4816 if (isCatchAll(Personality, TypeInfo)) {
4817 // This element is a catch-all. Bail out, noting this fact.
4818 SawCatchAll = true;
4819 break;
4820 }
4821
4822 // Even if we've seen a type in a catch clause, we don't want to
4823 // remove it from the filter. An unexpected type handler may be
4824 // set up for a call site which throws an exception of the same
4825 // type caught. In order for the exception thrown by the unexpected
4826 // handler to propagate correctly, the filter must be correctly
4827 // described for the call site.
4828 //
4829 // Example:
4830 //
4831 // void unexpected() { throw 1;}
4832 // void foo() throw (int) {
4833 // std::set_unexpected(unexpected);
4834 // try {
4835 // throw 2.0;
4836 // } catch (int i) {}
4837 // }
4838
4839 // There is no point in having multiple copies of the same typeinfo in
4840 // a filter, so only add it if we didn't already.
4841 if (SeenInFilter.insert(TypeInfo).second)
4842 NewFilterElts.push_back(cast<Constant>(Elt));
4843 }
4844 // A filter containing a catch-all cannot match anything by definition.
4845 if (SawCatchAll) {
4846 // Throw the filter away.
4847 MakeNewInstruction = true;
4848 continue;
4849 }
4850
4851 // If we dropped something from the filter, make a new one.
4852 if (NewFilterElts.size() < NumTypeInfos)
4853 MakeNewFilter = true;
4854 }
4855 if (MakeNewFilter) {
4856 FilterType = ArrayType::get(FilterType->getElementType(),
4857 NewFilterElts.size());
4858 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
4859 MakeNewInstruction = true;
4860 }
4861
4862 NewClauses.push_back(FilterClause);
4863
4864 // If the new filter is empty then it will catch everything so there is
4865 // no point in keeping any following clauses or marking the landingpad
4866 // as having a cleanup. The case of the original filter being empty was
4867 // already handled above.
4868 if (MakeNewFilter && !NewFilterElts.size()) {
4869 assert(MakeNewInstruction && "New filter but not a new instruction!");
4870 CleanupFlag = false;
4871 break;
4872 }
4873 }
4874 }
4875
4876 // If several filters occur in a row then reorder them so that the shortest
4877 // filters come first (those with the smallest number of elements). This is
4878 // advantageous because shorter filters are more likely to match, speeding up
4879 // unwinding, but mostly because it increases the effectiveness of the other
4880 // filter optimizations below.
4881 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
4882 unsigned j;
4883 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
4884 for (j = i; j != e; ++j)
4885 if (!isa<ArrayType>(NewClauses[j]->getType()))
4886 break;
4887
4888 // Check whether the filters are already sorted by length. We need to know
4889 // if sorting them is actually going to do anything so that we only make a
4890 // new landingpad instruction if it does.
4891 for (unsigned k = i; k + 1 < j; ++k)
4892 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
4893 // Not sorted, so sort the filters now. Doing an unstable sort would be
4894 // correct too but reordering filters pointlessly might confuse users.
4895 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
4897 MakeNewInstruction = true;
4898 break;
4899 }
4900
4901 // Look for the next batch of filters.
4902 i = j + 1;
4903 }
4904
4905 // If typeinfos matched if and only if equal, then the elements of a filter L
4906 // that occurs later than a filter F could be replaced by the intersection of
4907 // the elements of F and L. In reality two typeinfos can match without being
4908 // equal (for example if one represents a C++ class, and the other some class
4909 // derived from it) so it would be wrong to perform this transform in general.
4910 // However the transform is correct and useful if F is a subset of L. In that
4911 // case L can be replaced by F, and thus removed altogether since repeating a
4912 // filter is pointless. So here we look at all pairs of filters F and L where
4913 // L follows F in the list of clauses, and remove L if every element of F is
4914 // an element of L. This can occur when inlining C++ functions with exception
4915 // specifications.
4916 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
4917 // Examine each filter in turn.
4918 Value *Filter = NewClauses[i];
4919 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
4920 if (!FTy)
4921 // Not a filter - skip it.
4922 continue;
4923 unsigned FElts = FTy->getNumElements();
4924 // Examine each filter following this one. Doing this backwards means that
4925 // we don't have to worry about filters disappearing under us when removed.
4926 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
4927 Value *LFilter = NewClauses[j];
4928 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
4929 if (!LTy)
4930 // Not a filter - skip it.
4931 continue;
4932 // If Filter is a subset of LFilter, i.e. every element of Filter is also
4933 // an element of LFilter, then discard LFilter.
4934 SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
4935 // If Filter is empty then it is a subset of LFilter.
4936 if (!FElts) {
4937 // Discard LFilter.
4938 NewClauses.erase(J);
4939 MakeNewInstruction = true;
4940 // Move on to the next filter.
4941 continue;
4942 }
4943 unsigned LElts = LTy->getNumElements();
4944 // If Filter is longer than LFilter then it cannot be a subset of it.
4945 if (FElts > LElts)
4946 // Move on to the next filter.
4947 continue;
4948 // At this point we know that LFilter has at least one element.
4949 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
4950 // Filter is a subset of LFilter iff Filter contains only zeros (as we
4951 // already know that Filter is not longer than LFilter).
4953 assert(FElts <= LElts && "Should have handled this case earlier!");
4954 // Discard LFilter.
4955 NewClauses.erase(J);
4956 MakeNewInstruction = true;
4957 }
4958 // Move on to the next filter.
4959 continue;
4960 }
4961 ConstantArray *LArray = cast<ConstantArray>(LFilter);
4962 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
4963 // Since Filter is non-empty and contains only zeros, it is a subset of
4964 // LFilter iff LFilter contains a zero.
4965 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
4966 for (unsigned l = 0; l != LElts; ++l)
4967 if (LArray->getOperand(l)->isNullValue()) {
4968 // LFilter contains a zero - discard it.
4969 NewClauses.erase(J);
4970 MakeNewInstruction = true;
4971 break;
4972 }
4973 // Move on to the next filter.
4974 continue;
4975 }
4976 // At this point we know that both filters are ConstantArrays. Loop over
4977 // operands to see whether every element of Filter is also an element of
4978 // LFilter. Since filters tend to be short this is probably faster than
4979 // using a method that scales nicely.
4981 bool AllFound = true;
4982 for (unsigned f = 0; f != FElts; ++f) {
4983 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
4984 AllFound = false;
4985 for (unsigned l = 0; l != LElts; ++l) {
4986 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
4987 if (LTypeInfo == FTypeInfo) {
4988 AllFound = true;
4989 break;
4990 }
4991 }
4992 if (!AllFound)
4993 break;
4994 }
4995 if (AllFound) {
4996 // Discard LFilter.
4997 NewClauses.erase(J);
4998 MakeNewInstruction = true;
4999 }
5000 // Move on to the next filter.
5001 }
5002 }
5003
5004 // If we changed any of the clauses, replace the old landingpad instruction
5005 // with a new one.
5006 if (MakeNewInstruction) {
5008 NewClauses.size());
5009 for (Constant *C : NewClauses)
5010 NLI->addClause(C);
5011 // A landing pad with no clauses must have the cleanup flag set. It is
5012 // theoretically possible, though highly unlikely, that we eliminated all
5013 // clauses. If so, force the cleanup flag to true.
5014 if (NewClauses.empty())
5015 CleanupFlag = true;
5016 NLI->setCleanup(CleanupFlag);
5017 return NLI;
5018 }
5019
5020 // Even if none of the clauses changed, we may nonetheless have understood
5021 // that the cleanup flag is pointless. Clear it if so.
5022 if (LI.isCleanup() != CleanupFlag) {
5023 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
5024 LI.setCleanup(CleanupFlag);
5025 return &LI;
5026 }
5027
5028 return nullptr;
5029}
5030
5031Value *
5033 // Try to push freeze through instructions that propagate but don't produce
5034 // poison as far as possible. If an operand of freeze does not produce poison
5035 // then push the freeze through to the operands that are not guaranteed
5036 // non-poison. The actual transform is as follows.
5037 // Op1 = ... ; Op1 can be poison
5038 // Op0 = Inst(Op1, NonPoisonOps...)
5039 // ... = Freeze(Op0)
5040 // =>
5041 // Op1 = ...
5042 // Op1.fr = Freeze(Op1)
5043 // ... = Inst(Op1.fr, NonPoisonOps...)
5044
5045 auto CanPushFreeze = [](Value *V) {
5046 if (!isa<Instruction>(V) || isa<PHINode>(V))
5047 return false;
5048
5049 // We can't push the freeze through an instruction which can itself create
5050 // poison. If the only source of new poison is flags, we can simply
5051 // strip them (since we know the only use is the freeze and nothing can
5052 // benefit from them.)
5054 /*ConsiderFlagsAndMetadata*/ false);
5055 };
5056
5057 // Pushing freezes up long instruction chains can be expensive. Instead,
5058 // we directly push the freeze all the way to the leaves. However, we leave
5059 // deduplication of freezes on the same value for freezeOtherUses().
5060 Use *OrigUse = &OrigFI.getOperandUse(0);
5063 Worklist.push_back(OrigUse);
5064 while (!Worklist.empty()) {
5065 auto *U = Worklist.pop_back_val();
5066 Value *V = U->get();
5067 if (!CanPushFreeze(V)) {
5068 // If we can't push through the original instruction, abort the transform.
5069 if (U == OrigUse)
5070 return nullptr;
5071
5072 auto *UserI = cast<Instruction>(U->getUser());
5073 Builder.SetInsertPoint(UserI);
5074 Value *Frozen = Builder.CreateFreeze(V, V->getName() + ".fr");
5075 U->set(Frozen);
5076 continue;
5077 }
5078
5079 auto *I = cast<Instruction>(V);
5080 if (!Visited.insert(I).second)
5081 continue;
5082
5083 // reverse() to emit freezes in a more natural order.
5084 for (Use &Op : reverse(I->operands())) {
5085 Value *OpV = Op.get();
5087 continue;
5088 Worklist.push_back(&Op);
5089 }
5090
5091 I->dropPoisonGeneratingAnnotations();
5092 this->Worklist.add(I);
5093 }
5094
5095 return OrigUse->get();
5096}
5097
5099 PHINode *PN) {
5100 // Detect whether this is a recurrence with a start value and some number of
5101 // backedge values. We'll check whether we can push the freeze through the
5102 // backedge values (possibly dropping poison flags along the way) until we
5103 // reach the phi again. In that case, we can move the freeze to the start
5104 // value.
5105 Use *StartU = nullptr;
5107 for (Use &U : PN->incoming_values()) {
5108 if (DT.dominates(PN->getParent(), PN->getIncomingBlock(U))) {
5109 // Add backedge value to worklist.
5110 Worklist.push_back(U.get());
5111 continue;
5112 }
5113
5114 // Don't bother handling multiple start values.
5115 if (StartU)
5116 return nullptr;
5117 StartU = &U;
5118 }
5119
5120 if (!StartU || Worklist.empty())
5121 return nullptr; // Not a recurrence.
5122
5123 Value *StartV = StartU->get();
5124 BasicBlock *StartBB = PN->getIncomingBlock(*StartU);
5125 bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV);
5126 // We can't insert freeze if the start value is the result of the
5127 // terminator (e.g. an invoke).
5128 if (StartNeedsFreeze && StartBB->getTerminator() == StartV)
5129 return nullptr;
5130
5133 while (!Worklist.empty()) {
5134 Value *V = Worklist.pop_back_val();
5135 if (!Visited.insert(V).second)
5136 continue;
5137
5138 if (Visited.size() > 32)
5139 return nullptr; // Limit the total number of values we inspect.
5140
5141 // Assume that PN is non-poison, because it will be after the transform.
5142 if (V == PN || isGuaranteedNotToBeUndefOrPoison(V))
5143 continue;
5144
5147 /*ConsiderFlagsAndMetadata*/ false))
5148 return nullptr;
5149
5150 DropFlags.push_back(I);
5151 append_range(Worklist, I->operands());
5152 }
5153
5154 for (Instruction *I : DropFlags)
5155 I->dropPoisonGeneratingAnnotations();
5156
5157 if (StartNeedsFreeze) {
5158 Builder.SetInsertPoint(StartBB->getTerminator());
5159 Value *FrozenStartV = Builder.CreateFreeze(StartV,
5160 StartV->getName() + ".fr");
5161 replaceUse(*StartU, FrozenStartV);
5162 }
5163 return replaceInstUsesWith(FI, PN);
5164}
5165
5167 Value *Op = FI.getOperand(0);
5168
5169 if (isa<Constant>(Op) || Op->hasOneUse())
5170 return false;
5171
5172 // Move the freeze directly after the definition of its operand, so that
5173 // it dominates the maximum number of uses. Note that it may not dominate
5174 // *all* uses if the operand is an invoke/callbr and the use is in a phi on
5175 // the normal/default destination. This is why the domination check in the
5176 // replacement below is still necessary.
5177 BasicBlock::iterator MoveBefore;
5178 if (isa<Argument>(Op)) {
5179 MoveBefore =
5181 } else {
5182 auto MoveBeforeOpt = cast<Instruction>(Op)->getInsertionPointAfterDef();
5183 if (!MoveBeforeOpt)
5184 return false;
5185 MoveBefore = *MoveBeforeOpt;
5186 }
5187
5188 // Re-point iterator to come after any debug-info records.
5189 MoveBefore.setHeadBit(false);
5190
5191 bool Changed = false;
5192 if (&FI != &*MoveBefore) {
5193 FI.moveBefore(*MoveBefore->getParent(), MoveBefore);
5194 Changed = true;
5195 }
5196
5197 Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
5198 bool Dominates = DT.dominates(&FI, U);
5199 Changed |= Dominates;
5200 return Dominates;
5201 });
5202
5203 return Changed;
5204}
5205
5206// Check if any direct or bitcast user of this value is a shuffle instruction.
5208 for (auto *U : V->users()) {
5210 return true;
5211 else if (match(U, m_BitCast(m_Specific(V))) && isUsedWithinShuffleVector(U))
5212 return true;
5213 }
5214 return false;
5215}
5216
5218 Value *Op0 = I.getOperand(0);
5219
5220 if (Value *V = simplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
5221 return replaceInstUsesWith(I, V);
5222
5223 // freeze (phi const, x) --> phi const, (freeze x)
5224 if (auto *PN = dyn_cast<PHINode>(Op0)) {
5225 if (Instruction *NV = foldOpIntoPhi(I, PN))
5226 return NV;
5227 if (Instruction *NV = foldFreezeIntoRecurrence(I, PN))
5228 return NV;
5229 }
5230
5232 return replaceInstUsesWith(I, NI);
5233
5234 // If I is freeze(undef), check its uses and fold it to a fixed constant.
5235 // - or: pick -1
5236 // - select's condition: if the true value is constant, choose it by making
5237 // the condition true.
5238 // - default: pick 0
5239 //
5240 // Note that this transform is intentionally done here rather than
5241 // via an analysis in InstSimplify or at individual user sites. That is
5242 // because we must produce the same value for all uses of the freeze -
5243 // it's the reason "freeze" exists!
5244 //
5245 // TODO: This could use getBinopAbsorber() / getBinopIdentity() to avoid
5246 // duplicating logic for binops at least.
5247 auto getUndefReplacement = [&](Type *Ty) {
5248 Value *BestValue = nullptr;
5249 Value *NullValue = Constant::getNullValue(Ty);
5250 for (const auto *U : I.users()) {
5251 Value *V = NullValue;
5252 if (match(U, m_Or(m_Value(), m_Value())))
5254 else if (match(U, m_Select(m_Specific(&I), m_Constant(), m_Value())))
5255 V = ConstantInt::getTrue(Ty);
5256 else if (match(U, m_c_Select(m_Specific(&I), m_Value(V)))) {
5258 V = NullValue;
5259 }
5260
5261 if (!BestValue)
5262 BestValue = V;
5263 else if (BestValue != V)
5264 BestValue = NullValue;
5265 }
5266 assert(BestValue && "Must have at least one use");
5267 return BestValue;
5268 };
5269
5270 if (match(Op0, m_Undef())) {
5271 // Don't fold freeze(undef/poison) if it's used as a vector operand in
5272 // a shuffle. This may improve codegen for shuffles that allow
5273 // unspecified inputs.
5275 return nullptr;
5276 return replaceInstUsesWith(I, getUndefReplacement(I.getType()));
5277 }
5278
5279 auto getFreezeVectorReplacement = [](Constant *C) -> Constant * {
5280 Type *Ty = C->getType();
5281 auto *VTy = dyn_cast<FixedVectorType>(Ty);
5282 if (!VTy)
5283 return nullptr;
5284 unsigned NumElts = VTy->getNumElements();
5285 Constant *BestValue = Constant::getNullValue(VTy->getScalarType());
5286 for (unsigned i = 0; i != NumElts; ++i) {
5287 Constant *EltC = C->getAggregateElement(i);
5288 if (EltC && !match(EltC, m_Undef())) {
5289 BestValue = EltC;
5290 break;
5291 }
5292 }
5293 return Constant::replaceUndefsWith(C, BestValue);
5294 };
5295
5296 Constant *C;
5297 if (match(Op0, m_Constant(C)) && C->containsUndefOrPoisonElement() &&
5298 !C->containsConstantExpression()) {
5299 if (Constant *Repl = getFreezeVectorReplacement(C))
5300 return replaceInstUsesWith(I, Repl);
5301 }
5302
5303 // Replace uses of Op with freeze(Op).
5304 if (freezeOtherUses(I))
5305 return &I;
5306
5307 return nullptr;
5308}
5309
5310/// Check for case where the call writes to an otherwise dead alloca. This
5311/// shows up for unused out-params in idiomatic C/C++ code. Note that this
5312/// helper *only* analyzes the write; doesn't check any other legality aspect.
5314 auto *CB = dyn_cast<CallBase>(I);
5315 if (!CB)
5316 // TODO: handle e.g. store to alloca here - only worth doing if we extend
5317 // to allow reload along used path as described below. Otherwise, this
5318 // is simply a store to a dead allocation which will be removed.
5319 return false;
5320 std::optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
5321 if (!Dest)
5322 return false;
5323 auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
5324 if (!AI)
5325 // TODO: allow malloc?
5326 return false;
5327 // TODO: allow memory access dominated by move point? Note that since AI
5328 // could have a reference to itself captured by the call, we would need to
5329 // account for cycles in doing so.
5330 SmallVector<const User *> AllocaUsers;
5332 auto pushUsers = [&](const Instruction &I) {
5333 for (const User *U : I.users()) {
5334 if (Visited.insert(U).second)
5335 AllocaUsers.push_back(U);
5336 }
5337 };
5338 pushUsers(*AI);
5339 while (!AllocaUsers.empty()) {
5340 auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
5341 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
5342 pushUsers(*UserI);
5343 continue;
5344 }
5345 if (UserI == CB)
5346 continue;
5347 // TODO: support lifetime.start/end here
5348 return false;
5349 }
5350 return true;
5351}
5352
5353/// Try to move the specified instruction from its current block into the
5354/// beginning of DestBlock, which can only happen if it's safe to move the
5355/// instruction past all of the instructions between it and the end of its
5356/// block.
5358 BasicBlock *DestBlock) {
5359 BasicBlock *SrcBlock = I->getParent();
5360
5361 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
5362 if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
5363 I->isTerminator())
5364 return false;
5365
5366 // Do not sink static or dynamic alloca instructions. Static allocas must
5367 // remain in the entry block, and dynamic allocas must not be sunk in between
5368 // a stacksave / stackrestore pair, which would incorrectly shorten its
5369 // lifetime.
5370 if (isa<AllocaInst>(I))
5371 return false;
5372
5373 // Do not sink into catchswitch blocks.
5374 if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
5375 return false;
5376
5377 // Do not sink convergent call instructions.
5378 if (auto *CI = dyn_cast<CallInst>(I)) {
5379 if (CI->isConvergent())
5380 return false;
5381 }
5382
5383 // Unless we can prove that the memory write isn't visibile except on the
5384 // path we're sinking to, we must bail.
5385 if (I->mayWriteToMemory()) {
5386 if (!SoleWriteToDeadLocal(I, TLI))
5387 return false;
5388 }
5389
5390 // We can only sink load instructions if there is nothing between the load and
5391 // the end of block that could change the value.
5392 if (I->mayReadFromMemory() &&
5393 !I->hasMetadata(LLVMContext::MD_invariant_load)) {
5394 // We don't want to do any sophisticated alias analysis, so we only check
5395 // the instructions after I in I's parent block if we try to sink to its
5396 // successor block.
5397 if (DestBlock->getUniquePredecessor() != I->getParent())
5398 return false;
5399 for (BasicBlock::iterator Scan = std::next(I->getIterator()),
5400 E = I->getParent()->end();
5401 Scan != E; ++Scan)
5402 if (Scan->mayWriteToMemory())
5403 return false;
5404 }
5405
5406 I->dropDroppableUses([&](const Use *U) {
5407 auto *I = dyn_cast<Instruction>(U->getUser());
5408 if (I && I->getParent() != DestBlock) {
5409 Worklist.add(I);
5410 return true;
5411 }
5412 return false;
5413 });
5414 /// FIXME: We could remove droppable uses that are not dominated by
5415 /// the new position.
5416
5417 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
5418 I->moveBefore(*DestBlock, InsertPos);
5419 ++NumSunkInst;
5420
5421 // Also sink all related debug uses from the source basic block. Otherwise we
5422 // get debug use before the def. Attempt to salvage debug uses first, to
5423 // maximise the range variables have location for. If we cannot salvage, then
5424 // mark the location undef: we know it was supposed to receive a new location
5425 // here, but that computation has been sunk.
5426 SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
5427 findDbgUsers(I, DbgVariableRecords);
5428 if (!DbgVariableRecords.empty())
5429 tryToSinkInstructionDbgVariableRecords(I, InsertPos, SrcBlock, DestBlock,
5430 DbgVariableRecords);
5431
5432 // PS: there are numerous flaws with this behaviour, not least that right now
5433 // assignments can be re-ordered past other assignments to the same variable
5434 // if they use different Values. Creating more undef assignements can never be
5435 // undone. And salvaging all users outside of this block can un-necessarily
5436 // alter the lifetime of the live-value that the variable refers to.
5437 // Some of these things can be resolved by tolerating debug use-before-defs in
5438 // LLVM-IR, however it depends on the instruction-referencing CodeGen backend
5439 // being used for more architectures.
5440
5441 return true;
5442}
5443
5445 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
5446 BasicBlock *DestBlock,
5447 SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {
5448 // For all debug values in the destination block, the sunk instruction
5449 // will still be available, so they do not need to be dropped.
5450
5451 // Fetch all DbgVariableRecords not already in the destination.
5452 SmallVector<DbgVariableRecord *, 2> DbgVariableRecordsToSalvage;
5453 for (auto &DVR : DbgVariableRecords)
5454 if (DVR->getParent() != DestBlock)
5455 DbgVariableRecordsToSalvage.push_back(DVR);
5456
5457 // Fetch a second collection, of DbgVariableRecords in the source block that
5458 // we're going to sink.
5459 SmallVector<DbgVariableRecord *> DbgVariableRecordsToSink;
5460 for (DbgVariableRecord *DVR : DbgVariableRecordsToSalvage)
5461 if (DVR->getParent() == SrcBlock)
5462 DbgVariableRecordsToSink.push_back(DVR);
5463
5464 // Sort DbgVariableRecords according to their position in the block. This is a
5465 // partial order: DbgVariableRecords attached to different instructions will
5466 // be ordered by the instruction order, but DbgVariableRecords attached to the
5467 // same instruction won't have an order.
5468 auto Order = [](DbgVariableRecord *A, DbgVariableRecord *B) -> bool {
5469 return B->getInstruction()->comesBefore(A->getInstruction());
5470 };
5471 llvm::stable_sort(DbgVariableRecordsToSink, Order);
5472
5473 // If there are two assignments to the same variable attached to the same
5474 // instruction, the ordering between the two assignments is important. Scan
5475 // for this (rare) case and establish which is the last assignment.
5476 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5478 if (DbgVariableRecordsToSink.size() > 1) {
5480 // Count how many assignments to each variable there is per instruction.
5481 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5482 DebugVariable DbgUserVariable =
5483 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5484 DVR->getDebugLoc()->getInlinedAt());
5485 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5486 }
5487
5488 // If there are any instructions with two assignments, add them to the
5489 // FilterOutMap to record that they need extra filtering.
5491 for (auto It : CountMap) {
5492 if (It.second > 1) {
5493 FilterOutMap[It.first] = nullptr;
5494 DupSet.insert(It.first.first);
5495 }
5496 }
5497
5498 // For all instruction/variable pairs needing extra filtering, find the
5499 // latest assignment.
5500 for (const Instruction *Inst : DupSet) {
5501 for (DbgVariableRecord &DVR :
5502 llvm::reverse(filterDbgVars(Inst->getDbgRecordRange()))) {
5503 DebugVariable DbgUserVariable =
5504 DebugVariable(DVR.getVariable(), DVR.getExpression(),
5505 DVR.getDebugLoc()->getInlinedAt());
5506 auto FilterIt =
5507 FilterOutMap.find(std::make_pair(Inst, DbgUserVariable));
5508 if (FilterIt == FilterOutMap.end())
5509 continue;
5510 if (FilterIt->second != nullptr)
5511 continue;
5512 FilterIt->second = &DVR;
5513 }
5514 }
5515 }
5516
5517 // Perform cloning of the DbgVariableRecords that we plan on sinking, filter
5518 // out any duplicate assignments identified above.
5520 SmallSet<DebugVariable, 4> SunkVariables;
5521 for (DbgVariableRecord *DVR : DbgVariableRecordsToSink) {
5523 continue;
5524
5525 DebugVariable DbgUserVariable =
5526 DebugVariable(DVR->getVariable(), DVR->getExpression(),
5527 DVR->getDebugLoc()->getInlinedAt());
5528
5529 // For any variable where there were multiple assignments in the same place,
5530 // ignore all but the last assignment.
5531 if (!FilterOutMap.empty()) {
5532 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5533 auto It = FilterOutMap.find(IVP);
5534
5535 // Filter out.
5536 if (It != FilterOutMap.end() && It->second != DVR)
5537 continue;
5538 }
5539
5540 if (!SunkVariables.insert(DbgUserVariable).second)
5541 continue;
5542
5543 if (DVR->isDbgAssign())
5544 continue;
5545
5546 DVRClones.emplace_back(DVR->clone());
5547 LLVM_DEBUG(dbgs() << "CLONE: " << *DVRClones.back() << '\n');
5548 }
5549
5550 // Perform salvaging without the clones, then sink the clones.
5551 if (DVRClones.empty())
5552 return;
5553
5554 salvageDebugInfoForDbgValues(*I, DbgVariableRecordsToSalvage);
5555
5556 // The clones are in reverse order of original appearance. Assert that the
5557 // head bit is set on the iterator as we _should_ have received it via
5558 // getFirstInsertionPt. Inserting like this will reverse the clone order as
5559 // we'll repeatedly insert at the head, such as:
5560 // DVR-3 (third insertion goes here)
5561 // DVR-2 (second insertion goes here)
5562 // DVR-1 (first insertion goes here)
5563 // Any-Prior-DVRs
5564 // InsertPtInst
5565 assert(InsertPos.getHeadBit());
5566 for (DbgVariableRecord *DVRClone : DVRClones) {
5567 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5568 LLVM_DEBUG(dbgs() << "SINK: " << *DVRClone << '\n');
5569 }
5570}
5571
5573 while (!Worklist.isEmpty()) {
5574 // Walk deferred instructions in reverse order, and push them to the
5575 // worklist, which means they'll end up popped from the worklist in-order.
5576 while (Instruction *I = Worklist.popDeferred()) {
5577 // Check to see if we can DCE the instruction. We do this already here to
5578 // reduce the number of uses and thus allow other folds to trigger.
5579 // Note that eraseInstFromFunction() may push additional instructions on
5580 // the deferred worklist, so this will DCE whole instruction chains.
5583 ++NumDeadInst;
5584 continue;
5585 }
5586
5587 Worklist.push(I);
5588 }
5589
5590 Instruction *I = Worklist.removeOne();
5591 if (I == nullptr) continue; // skip null values.
5592
5593 // Check to see if we can DCE the instruction.
5596 ++NumDeadInst;
5597 continue;
5598 }
5599
5600 if (!DebugCounter::shouldExecute(VisitCounter))
5601 continue;
5602
5603 // See if we can trivially sink this instruction to its user if we can
5604 // prove that the successor is not executed more frequently than our block.
5605 // Return the UserBlock if successful.
5606 auto getOptionalSinkBlockForInst =
5607 [this](Instruction *I) -> std::optional<BasicBlock *> {
5608 if (!EnableCodeSinking)
5609 return std::nullopt;
5610
5611 BasicBlock *BB = I->getParent();
5612 BasicBlock *UserParent = nullptr;
5613 unsigned NumUsers = 0;
5614
5615 for (Use &U : I->uses()) {
5616 User *User = U.getUser();
5617 if (User->isDroppable())
5618 continue;
5619 if (NumUsers > MaxSinkNumUsers)
5620 return std::nullopt;
5621
5622 Instruction *UserInst = cast<Instruction>(User);
5623 // Special handling for Phi nodes - get the block the use occurs in.
5624 BasicBlock *UserBB = UserInst->getParent();
5625 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
5626 UserBB = PN->getIncomingBlock(U);
5627 // Bail out if we have uses in different blocks. We don't do any
5628 // sophisticated analysis (i.e finding NearestCommonDominator of these
5629 // use blocks).
5630 if (UserParent && UserParent != UserBB)
5631 return std::nullopt;
5632 UserParent = UserBB;
5633
5634 // Make sure these checks are done only once, naturally we do the checks
5635 // the first time we get the userparent, this will save compile time.
5636 if (NumUsers == 0) {
5637 // Try sinking to another block. If that block is unreachable, then do
5638 // not bother. SimplifyCFG should handle it.
5639 if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
5640 return std::nullopt;
5641
5642 auto *Term = UserParent->getTerminator();
5643 // See if the user is one of our successors that has only one
5644 // predecessor, so that we don't have to split the critical edge.
5645 // Another option where we can sink is a block that ends with a
5646 // terminator that does not pass control to other block (such as
5647 // return or unreachable or resume). In this case:
5648 // - I dominates the User (by SSA form);
5649 // - the User will be executed at most once.
5650 // So sinking I down to User is always profitable or neutral.
5651 if (UserParent->getUniquePredecessor() != BB && !succ_empty(Term))
5652 return std::nullopt;
5653
5654 assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
5655 }
5656
5657 NumUsers++;
5658 }
5659
5660 // No user or only has droppable users.
5661 if (!UserParent)
5662 return std::nullopt;
5663
5664 return UserParent;
5665 };
5666
5667 auto OptBB = getOptionalSinkBlockForInst(I);
5668 if (OptBB) {
5669 auto *UserParent = *OptBB;
5670 // Okay, the CFG is simple enough, try to sink this instruction.
5671 if (tryToSinkInstruction(I, UserParent)) {
5672 LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
5673 MadeIRChange = true;
5674 // We'll add uses of the sunk instruction below, but since
5675 // sinking can expose opportunities for it's *operands* add
5676 // them to the worklist
5677 for (Use &U : I->operands())
5678 if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
5679 Worklist.push(OpI);
5680 }
5681 }
5682
5683 // Now that we have an instruction, try combining it to simplify it.
5684 Builder.SetInsertPoint(I);
5685 Builder.CollectMetadataToCopy(
5686 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5687
5688#ifndef NDEBUG
5689 std::string OrigI;
5690#endif
5691 LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS););
5692 LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
5693
5694 if (Instruction *Result = visit(*I)) {
5695 ++NumCombined;
5696 // Should we replace the old instruction with a new one?
5697 if (Result != I) {
5698 LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
5699 << " New = " << *Result << '\n');
5700
5701 // We copy the old instruction's DebugLoc to the new instruction, unless
5702 // InstCombine already assigned a DebugLoc to it, in which case we
5703 // should trust the more specifically selected DebugLoc.
5704 Result->setDebugLoc(Result->getDebugLoc().orElse(I->getDebugLoc()));
5705 // We also copy annotation metadata to the new instruction.
5706 Result->copyMetadata(*I, LLVMContext::MD_annotation);
5707 // Everything uses the new instruction now.
5708 I->replaceAllUsesWith(Result);
5709
5710 // Move the name to the new instruction first.
5711 Result->takeName(I);
5712
5713 // Insert the new instruction into the basic block...
5714 BasicBlock *InstParent = I->getParent();
5715 BasicBlock::iterator InsertPos = I->getIterator();
5716
5717 // Are we replace a PHI with something that isn't a PHI, or vice versa?
5718 if (isa<PHINode>(Result) != isa<PHINode>(I)) {
5719 // We need to fix up the insertion point.
5720 if (isa<PHINode>(I)) // PHI -> Non-PHI
5721 InsertPos = InstParent->getFirstInsertionPt();
5722 else // Non-PHI -> PHI
5723 InsertPos = InstParent->getFirstNonPHIIt();
5724 }
5725
5726 Result->insertInto(InstParent, InsertPos);
5727
5728 // Push the new instruction and any users onto the worklist.
5729 Worklist.pushUsersToWorkList(*Result);
5730 Worklist.push(Result);
5731
5733 } else {
5734 LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
5735 << " New = " << *I << '\n');
5736
5737 // If the instruction was modified, it's possible that it is now dead.
5738 // if so, remove it.
5741 } else {
5742 Worklist.pushUsersToWorkList(*I);
5743 Worklist.push(I);
5744 }
5745 }
5746 MadeIRChange = true;
5747 }
5748 }
5749
5750 Worklist.zap();
5751 return MadeIRChange;
5752}
5753
5754// Track the scopes used by !alias.scope and !noalias. In a function, a
5755// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
5756// by both sets. If not, the declaration of the scope can be safely omitted.
5757// The MDNode of the scope can be omitted as well for the instructions that are
5758// part of this function. We do not do that at this point, as this might become
5759// too time consuming to do.
5761 SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
5762 SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
5763
5764public:
5766 // This seems to be faster than checking 'mayReadOrWriteMemory()'.
5767 if (!I->hasMetadataOtherThanDebugLoc())
5768 return;
5769
5770 auto Track = [](Metadata *ScopeList, auto &Container) {
5771 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5772 if (!MDScopeList || !Container.insert(MDScopeList).second)
5773 return;
5774 for (const auto &MDOperand : MDScopeList->operands())
5775 if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
5776 Container.insert(MDScope);
5777 };
5778
5779 Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5780 Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5781 }
5782
5785 if (!Decl)
5786 return false;
5787
5788 assert(Decl->use_empty() &&
5789 "llvm.experimental.noalias.scope.decl in use ?");
5790 const MDNode *MDSL = Decl->getScopeList();
5791 assert(MDSL->getNumOperands() == 1 &&
5792 "llvm.experimental.noalias.scope should refer to a single scope");
5793 auto &MDOperand = MDSL->getOperand(0);
5794 if (auto *MD = dyn_cast<MDNode>(MDOperand))
5795 return !UsedAliasScopesAndLists.contains(MD) ||
5796 !UsedNoAliasScopesAndLists.contains(MD);
5797
5798 // Not an MDNode ? throw away.
5799 return true;
5800 }
5801};
5802
5803/// Populate the IC worklist from a function, by walking it in reverse
5804/// post-order and adding all reachable code to the worklist.
5805///
5806/// This has a couple of tricks to make the code faster and more powerful. In
5807/// particular, we constant fold and DCE instructions as we go, to avoid adding
5808/// them to the worklist (this significantly speeds up instcombine on code where
5809/// many instructions are dead or constant). Additionally, if we find a branch
5810/// whose condition is a known constant, we only visit the reachable successors.
5812 bool MadeIRChange = false;
5814 SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
5815 DenseMap<Constant *, Constant *> FoldedConstants;
5816 AliasScopeTracker SeenAliasScopes;
5817
5818 auto HandleOnlyLiveSuccessor = [&](BasicBlock *BB, BasicBlock *LiveSucc) {
5819 for (BasicBlock *Succ : successors(BB))
5820 if (Succ != LiveSucc && DeadEdges.insert({BB, Succ}).second)
5821 for (PHINode &PN : Succ->phis())
5822 for (Use &U : PN.incoming_values())
5823 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5824 U.set(PoisonValue::get(PN.getType()));
5825 MadeIRChange = true;
5826 }
5827 };
5828
5829 for (BasicBlock *BB : RPOT) {
5830 if (!BB->isEntryBlock() && all_of(predecessors(BB), [&](BasicBlock *Pred) {
5831 return DeadEdges.contains({Pred, BB}) || DT.dominates(BB, Pred);
5832 })) {
5833 HandleOnlyLiveSuccessor(BB, nullptr);
5834 continue;
5835 }
5836 LiveBlocks.insert(BB);
5837
5838 for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
5839 // ConstantProp instruction if trivially constant.
5840 if (!Inst.use_empty() &&
5841 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5842 if (Constant *C = ConstantFoldInstruction(&Inst, DL, &TLI)) {
5843 LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
5844 << '\n');
5845 Inst.replaceAllUsesWith(C);
5846 ++NumConstProp;
5847 if (isInstructionTriviallyDead(&Inst, &TLI))
5848 Inst.eraseFromParent();
5849 MadeIRChange = true;
5850 continue;
5851 }
5852
5853 // See if we can constant fold its operands.
5854 for (Use &U : Inst.operands()) {
5856 continue;
5857
5858 auto *C = cast<Constant>(U);
5859 Constant *&FoldRes = FoldedConstants[C];
5860 if (!FoldRes)
5861 FoldRes = ConstantFoldConstant(C, DL, &TLI);
5862
5863 if (FoldRes != C) {
5864 LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
5865 << "\n Old = " << *C
5866 << "\n New = " << *FoldRes << '\n');
5867 U = FoldRes;
5868 MadeIRChange = true;
5869 }
5870 }
5871
5872 // Skip processing debug and pseudo intrinsics in InstCombine. Processing
5873 // these call instructions consumes non-trivial amount of time and
5874 // provides no value for the optimization.
5875 if (!Inst.isDebugOrPseudoInst()) {
5876 InstrsForInstructionWorklist.push_back(&Inst);
5877 SeenAliasScopes.analyse(&Inst);
5878 }
5879 }
5880
5881 // If this is a branch or switch on a constant, mark only the single
5882 // live successor. Otherwise assume all successors are live.
5883 Instruction *TI = BB->getTerminator();
5884 if (BranchInst *BI = dyn_cast<BranchInst>(TI); BI && BI->isConditional()) {
5885 if (isa<UndefValue>(BI->getCondition())) {
5886 // Branch on undef is UB.
5887 HandleOnlyLiveSuccessor(BB, nullptr);
5888 continue;
5889 }
5890 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5891 bool CondVal = Cond->getZExtValue();
5892 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5893 continue;
5894 }
5895 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5896 if (isa<UndefValue>(SI->getCondition())) {
5897 // Switch on undef is UB.
5898 HandleOnlyLiveSuccessor(BB, nullptr);
5899 continue;
5900 }
5901 if (auto *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5902 HandleOnlyLiveSuccessor(BB,
5903 SI->findCaseValue(Cond)->getCaseSuccessor());
5904 continue;
5905 }
5906 }
5907 }
5908
5909 // Remove instructions inside unreachable blocks. This prevents the
5910 // instcombine code from having to deal with some bad special cases, and
5911 // reduces use counts of instructions.
5912 for (BasicBlock &BB : F) {
5913 if (LiveBlocks.count(&BB))
5914 continue;
5915
5916 unsigned NumDeadInstInBB;
5917 NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
5918
5919 MadeIRChange |= NumDeadInstInBB != 0;
5920 NumDeadInst += NumDeadInstInBB;
5921 }
5922
5923 // Once we've found all of the instructions to add to instcombine's worklist,
5924 // add them in reverse order. This way instcombine will visit from the top
5925 // of the function down. This jives well with the way that it adds all uses
5926 // of instructions to the worklist after doing a transformation, thus avoiding
5927 // some N^2 behavior in pathological cases.
5928 Worklist.reserve(InstrsForInstructionWorklist.size());
5929 for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
5930 // DCE instruction if trivially dead. As we iterate in reverse program
5931 // order here, we will clean up whole chains of dead instructions.
5932 if (isInstructionTriviallyDead(Inst, &TLI) ||
5933 SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
5934 ++NumDeadInst;
5935 LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
5936 salvageDebugInfo(*Inst);
5937 Inst->eraseFromParent();
5938 MadeIRChange = true;
5939 continue;
5940 }
5941
5942 Worklist.push(Inst);
5943 }
5944
5945 return MadeIRChange;
5946}
5947
5949 // Collect backedges.
5951 for (BasicBlock *BB : RPOT) {
5952 Visited.insert(BB);
5953 for (BasicBlock *Succ : successors(BB))
5954 if (Visited.contains(Succ))
5955 BackEdges.insert({BB, Succ});
5956 }
5957 ComputedBackEdges = true;
5958}
5959
5965 const InstCombineOptions &Opts) {
5966 auto &DL = F.getDataLayout();
5967 bool VerifyFixpoint = Opts.VerifyFixpoint &&
5968 !F.hasFnAttribute("instcombine-no-verify-fixpoint");
5969
5970 /// Builder - This is an IRBuilder that automatically inserts new
5971 /// instructions into the worklist when they are created.
5973 F.getContext(), TargetFolder(DL),
5974 IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
5975 Worklist.add(I);
5976 if (auto *Assume = dyn_cast<AssumeInst>(I))
5977 AC.registerAssumption(Assume);
5978 }));
5979
5981
5982 // Lower dbg.declare intrinsics otherwise their value may be clobbered
5983 // by instcombiner.
5984 bool MadeIRChange = false;
5986 MadeIRChange = LowerDbgDeclare(F);
5987
5988 // Iterate while there is work to do.
5989 unsigned Iteration = 0;
5990 while (true) {
5991 if (Iteration >= Opts.MaxIterations && !VerifyFixpoint) {
5992 LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << Opts.MaxIterations
5993 << " on " << F.getName()
5994 << " reached; stopping without verifying fixpoint\n");
5995 break;
5996 }
5997
5998 ++Iteration;
5999 ++NumWorklistIterations;
6000 LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
6001 << F.getName() << "\n");
6002
6003 InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
6004 ORE, BFI, BPI, PSI, DL, RPOT);
6006 bool MadeChangeInThisIteration = IC.prepareWorklist(F);
6007 MadeChangeInThisIteration |= IC.run();
6008 if (!MadeChangeInThisIteration)
6009 break;
6010
6011 MadeIRChange = true;
6012 if (Iteration > Opts.MaxIterations) {
6014 "Instruction Combining on " + Twine(F.getName()) +
6015 " did not reach a fixpoint after " + Twine(Opts.MaxIterations) +
6016 " iterations. " +
6017 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6018 "'instcombine-no-verify-fixpoint' to suppress this error.");
6019 }
6020 }
6021
6022 if (Iteration == 1)
6023 ++NumOneIteration;
6024 else if (Iteration == 2)
6025 ++NumTwoIterations;
6026 else if (Iteration == 3)
6027 ++NumThreeIterations;
6028 else
6029 ++NumFourOrMoreIterations;
6030
6031 return MadeIRChange;
6032}
6033
6035
6037 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
6038 static_cast<PassInfoMixin<InstCombinePass> *>(this)->printPipeline(
6039 OS, MapClassName2PassName);
6040 OS << '<';
6041 OS << "max-iterations=" << Options.MaxIterations << ";";
6042 OS << (Options.VerifyFixpoint ? "" : "no-") << "verify-fixpoint";
6043 OS << '>';
6044}
6045
6046char InstCombinePass::ID = 0;
6047
6050 auto &LRT = AM.getResult<LastRunTrackingAnalysis>(F);
6051 // No changes since last InstCombine pass, exit early.
6052 if (LRT.shouldSkip(&ID))
6053 return PreservedAnalyses::all();
6054
6055 auto &AC = AM.getResult<AssumptionAnalysis>(F);
6056 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
6057 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
6059 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
6060
6061 auto *AA = &AM.getResult<AAManager>(F);
6062 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
6063 ProfileSummaryInfo *PSI =
6064 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
6065 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6066 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
6068
6069 if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6070 BFI, BPI, PSI, Options)) {
6071 // No changes, all analyses are preserved.
6072 LRT.update(&ID, /*Changed=*/false);
6073 return PreservedAnalyses::all();
6074 }
6075
6076 // Mark all the analyses that instcombine updates as preserved.
6078 LRT.update(&ID, /*Changed=*/true);
6081 return PA;
6082}
6083
6099
6101 if (skipFunction(F))
6102 return false;
6103
6104 // Required analyses.
6105 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
6106 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
6107 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
6109 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
6111
6112 // Optional analyses.
6113 ProfileSummaryInfo *PSI =
6115 BlockFrequencyInfo *BFI =
6116 (PSI && PSI->hasProfileSummary()) ?
6118 nullptr;
6119 BranchProbabilityInfo *BPI = nullptr;
6120 if (auto *WrapperPass =
6122 BPI = &WrapperPass->getBPI();
6123
6124 return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
6125 BFI, BPI, PSI, InstCombineOptions());
6126}
6127
6129
6133
6135 "Combine redundant instructions", false, false)
6146 "Combine redundant instructions", false, false)
6147
6148// Initialization Routines
6152
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
DXIL Resource Access
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
Definition IVUsers.cpp:48
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file contains the declarations for metadata subclasses.
#define T
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
unsigned OpIndex
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:167
#define LLVM_DEBUG(...)
Definition Debug.h:119
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
Definition APInt.cpp:1758
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition APInt.h:423
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Definition APInt.cpp:1890
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition APInt.h:371
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1928
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition APInt.h:827
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1960
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:306
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:1941
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition APInt.h:851
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition ArrayRef.h:224
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:528
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
Definition BasicBlock.h:482
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
size_t size() const
Definition BasicBlock.h:480
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
Definition InstrTypes.h:374
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Definition InstrTypes.h:294
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ ICMP_NE
not equal
Definition InstrTypes.h:700
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition Constants.h:776
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
Definition Constants.h:517
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
Definition Constant.h:219
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
bool empty() const
Definition DenseMap.h:107
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:322
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
iterator_range< idx_iterator > indices() const
idx_iterator idx_end() const
static ExtractValueInst * Create(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
idx_iterator idx_begin() const
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition Pass.cpp:188
const BasicBlock & getEntryBlock() const
Definition Function.h:807
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
bool isInBounds() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2036
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition IRBuilder.h:538
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
Definition IRBuilder.h:75
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
SimplifyQuery SQ
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
TargetLibraryInfo & TLI
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
const DataLayout & DL
DomConditionCache DC
const bool MinimizeSize
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
AssumptionCache & AC
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
DominatorTree & DT
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
BuilderTy & Builder
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
The legacy pass manager's instcombine pass.
Definition InstCombine.h:68
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
bool isShift() const
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isIntDivRem() const
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1077
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1445
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1451
Tracking metadata reference owned by Metadata.
Definition Metadata.h:899
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Root of the metadata hierarchy.
Definition Metadata.h:63
Value * getLHS() const
Value * getRHS() const
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
MDNode * getScopeList() const
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition Operator.h:78
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition Operator.h:111
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition Operator.h:105
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
Definition Constants.h:1468
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Definition Registry.h:44
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:168
This instruction constructs a fixed permutation of two input vectors.
size_type size() const
Definition SmallPtrSet.h:99
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:356
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:181
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Multiway switch.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
Definition Type.cpp:62
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:294
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:107
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Use * op_iterator
Definition User.h:279
op_range operands()
Definition User.h:292
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Definition User.cpp:21
op_iterator op_begin()
Definition User.h:284
const Use & getOperandUse(unsigned i) const
Definition User.h:245
Value * getOperand(unsigned i) const
Definition User.h:232
unsigned getNumOperands() const
Definition User.h:254
op_iterator op_end()
Definition User.h:286
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition User.cpp:115
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition Value.h:759
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
Definition Value.cpp:166
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
iterator_range< user_iterator > users()
Definition Value.h:426
bool hasUseList() const
Check if this Value has a use-list.
Definition Value.h:344
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition Value.cpp:150
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:701
bool use_empty() const
Definition Value.h:346
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1101
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:881
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
reverse_self_iterator getReverseIterator()
Definition ilist_node.h:137
self_iterator getIterator()
Definition ilist_node.h:134
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:330
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:843
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2060
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
Definition Local.cpp:2485
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
Definition CFG.h:256
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2474
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition Utils.cpp:1723
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2138
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:646
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
Definition APFloat.h:1555
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
Definition Local.cpp:2468
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:157
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
constexpr bool has_single_bit(T Value) noexcept
Definition bit.h:147
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1734
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
Definition Local.cpp:22
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Definition STLExtras.h:420
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
Definition Local.cpp:1795
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
Definition Local.cpp:1662
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
Definition Local.cpp:2037
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition Local.cpp:2414
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:337
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
TargetTransformInfo TTI
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1963
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1899
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Definition STLExtras.h:2090
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
static constexpr roundingMode rmNearestTiesToEven
Definition APFloat.h:304
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Definition APFloat.cpp:324
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition KnownBits.h:244
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:241
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SimplifyQuery getWithInstruction(const Instruction *I) const