LLVM 19.0.0git
InstCombineInternal.h
Go to the documentation of this file.
1//===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10///
11/// This file provides internal interfaces used to implement the InstCombine.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
17
18#include "llvm/ADT/Statistic.h"
23#include "llvm/IR/IRBuilder.h"
24#include "llvm/IR/InstVisitor.h"
26#include "llvm/IR/Value.h"
27#include "llvm/Support/Debug.h"
31#include <cassert>
32
33#define DEBUG_TYPE "instcombine"
35
36using namespace llvm::PatternMatch;
37
38// As a default, let's assume that we want to be aggressive,
39// and attempt to traverse with no limits in attempt to sink negation.
40static constexpr unsigned NegatorDefaultMaxDepth = ~0U;
41
42// Let's guesstimate that most often we will end up visiting/producing
43// fairly small number of new instructions.
44static constexpr unsigned NegatorMaxNodesSSO = 16;
45
46namespace llvm {
47
48class AAResults;
49class APInt;
50class AssumptionCache;
52class DataLayout;
53class DominatorTree;
54class GEPOperator;
55class GlobalVariable;
56class LoopInfo;
60class User;
61
63 : public InstCombiner,
64 public InstVisitor<InstCombinerImpl, Instruction *> {
65public:
67 bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
71 const DataLayout &DL, LoopInfo *LI)
72 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE,
73 BFI, PSI, DL, LI) {}
74
75 virtual ~InstCombinerImpl() = default;
76
77 /// Perform early cleanup and prepare the InstCombine worklist.
78 bool prepareWorklist(Function &F,
80
81 /// Run the combiner over the entire worklist until it is empty.
82 ///
83 /// \returns true if the IR is changed.
84 bool run();
85
86 // Visitation implementation - Implement instruction combining for different
87 // instruction types. The semantics are as follows:
88 // Return Value:
89 // null - No change was made
90 // I - Change was made, I is still valid, I may be dead though
91 // otherwise - Change was made, replace I with returned instruction
92 //
93 Instruction *visitFNeg(UnaryOperator &I);
94 Instruction *visitAdd(BinaryOperator &I);
95 Instruction *visitFAdd(BinaryOperator &I);
96 Value *OptimizePointerDifference(
97 Value *LHS, Value *RHS, Type *Ty, bool isNUW);
98 Instruction *visitSub(BinaryOperator &I);
99 Instruction *visitFSub(BinaryOperator &I);
100 Instruction *visitMul(BinaryOperator &I);
101 Instruction *foldPowiReassoc(BinaryOperator &I);
102 Instruction *foldFMulReassoc(BinaryOperator &I);
103 Instruction *visitFMul(BinaryOperator &I);
104 Instruction *visitURem(BinaryOperator &I);
105 Instruction *visitSRem(BinaryOperator &I);
106 Instruction *visitFRem(BinaryOperator &I);
107 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I);
108 Instruction *commonIRemTransforms(BinaryOperator &I);
109 Instruction *commonIDivTransforms(BinaryOperator &I);
110 Instruction *visitUDiv(BinaryOperator &I);
111 Instruction *visitSDiv(BinaryOperator &I);
112 Instruction *visitFDiv(BinaryOperator &I);
113 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
114 Instruction *visitAnd(BinaryOperator &I);
115 Instruction *visitOr(BinaryOperator &I);
116 bool sinkNotIntoLogicalOp(Instruction &I);
117 bool sinkNotIntoOtherHandOfLogicalOp(Instruction &I);
118 Instruction *visitXor(BinaryOperator &I);
119 Instruction *visitShl(BinaryOperator &I);
120 Value *reassociateShiftAmtsOfTwoSameDirectionShifts(
121 BinaryOperator *Sh0, const SimplifyQuery &SQ,
122 bool AnalyzeForSignBitExtraction = false);
123 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
125 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract(
126 BinaryOperator &OldAShr);
127 Instruction *visitAShr(BinaryOperator &I);
128 Instruction *visitLShr(BinaryOperator &I);
129 Instruction *commonShiftTransforms(BinaryOperator &I);
130 Instruction *visitFCmpInst(FCmpInst &I);
131 CmpInst *canonicalizeICmpPredicate(CmpInst &I);
132 Instruction *visitICmpInst(ICmpInst &I);
133 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
135 Instruction *commonCastTransforms(CastInst &CI);
136 Instruction *visitTrunc(TruncInst &CI);
137 Instruction *visitZExt(ZExtInst &Zext);
138 Instruction *visitSExt(SExtInst &Sext);
139 Instruction *visitFPTrunc(FPTruncInst &CI);
140 Instruction *visitFPExt(CastInst &CI);
141 Instruction *visitFPToUI(FPToUIInst &FI);
142 Instruction *visitFPToSI(FPToSIInst &FI);
143 Instruction *visitUIToFP(CastInst &CI);
144 Instruction *visitSIToFP(CastInst &CI);
145 Instruction *visitPtrToInt(PtrToIntInst &CI);
146 Instruction *visitIntToPtr(IntToPtrInst &CI);
147 Instruction *visitBitCast(BitCastInst &CI);
148 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
149 Instruction *foldItoFPtoI(CastInst &FI);
151 Instruction *visitCallInst(CallInst &CI);
152 Instruction *visitInvokeInst(InvokeInst &II);
153 Instruction *visitCallBrInst(CallBrInst &CBI);
154
155 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
156 Instruction *visitPHINode(PHINode &PN);
157 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
158 Instruction *visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src);
159 Instruction *visitAllocaInst(AllocaInst &AI);
160 Instruction *visitAllocSite(Instruction &FI);
161 Instruction *visitFree(CallInst &FI, Value *FreedOp);
162 Instruction *visitLoadInst(LoadInst &LI);
163 Instruction *visitStoreInst(StoreInst &SI);
164 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI);
165 Instruction *visitUnconditionalBranchInst(BranchInst &BI);
166 Instruction *visitBranchInst(BranchInst &BI);
167 Instruction *visitFenceInst(FenceInst &FI);
168 Instruction *visitSwitchInst(SwitchInst &SI);
169 Instruction *visitReturnInst(ReturnInst &RI);
170 Instruction *visitUnreachableInst(UnreachableInst &I);
172 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI);
173 Instruction *visitInsertValueInst(InsertValueInst &IV);
174 Instruction *visitInsertElementInst(InsertElementInst &IE);
175 Instruction *visitExtractElementInst(ExtractElementInst &EI);
176 Instruction *simplifyBinOpSplats(ShuffleVectorInst &SVI);
177 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
178 Instruction *visitExtractValueInst(ExtractValueInst &EV);
179 Instruction *visitLandingPadInst(LandingPadInst &LI);
180 Instruction *visitVAEndInst(VAEndInst &I);
181 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI);
182 bool freezeOtherUses(FreezeInst &FI);
183 Instruction *foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN);
184 Instruction *visitFreeze(FreezeInst &I);
185
186 /// Specify what to return for unhandled instructions.
188
189 /// True when DB dominates all uses of DI except UI.
190 /// UI must be in the same block as DI.
191 /// The routine checks that the DI parent and DB are different.
192 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
193 const BasicBlock *DB) const;
194
195 /// Try to replace select with select operand SIOpd in SI-ICmp sequence.
196 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
197 const unsigned SIOpd);
198
199 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy,
200 const Twine &Suffix = "");
201
203 FPClassTest Interested = fcAllFlags,
204 const Instruction *CtxI = nullptr,
205 unsigned Depth = 0) const {
207 Val, FMF, Interested, Depth,
208 getSimplifyQuery().getWithInstruction(CtxI));
209 }
210
212 FPClassTest Interested = fcAllFlags,
213 const Instruction *CtxI = nullptr,
214 unsigned Depth = 0) const {
216 Val, Interested, Depth, getSimplifyQuery().getWithInstruction(CtxI));
217 }
218
219 /// Check if fmul \p MulVal, +0.0 will yield +0.0 (or signed zero is
220 /// ignorable).
222 const Instruction *CtxI) const;
223
224 Constant *getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp) {
225 Constant *TruncC = ConstantExpr::getTrunc(C, TruncTy);
226 Constant *ExtTruncC =
227 ConstantFoldCastOperand(ExtOp, TruncC, C->getType(), DL);
228 if (ExtTruncC && ExtTruncC == C)
229 return TruncC;
230 return nullptr;
231 }
232
234 return getLosslessTrunc(C, TruncTy, Instruction::ZExt);
235 }
236
238 return getLosslessTrunc(C, TruncTy, Instruction::SExt);
239 }
240
241 std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
242 convertOrOfShiftsToFunnelShift(Instruction &Or);
243
244private:
245 bool annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI);
246 bool isDesirableIntType(unsigned BitWidth) const;
247 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
248 bool shouldChangeType(Type *From, Type *To) const;
249 Value *dyn_castNegVal(Value *V) const;
250
251 /// Classify whether a cast is worth optimizing.
252 ///
253 /// This is a helper to decide whether the simplification of
254 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed.
255 ///
256 /// \param CI The cast we are interested in.
257 ///
258 /// \return true if this cast actually results in any code being generated and
259 /// if it cannot already be eliminated by some other transformation.
260 bool shouldOptimizeCast(CastInst *CI);
261
262 /// Try to optimize a sequence of instructions checking if an operation
263 /// on LHS and RHS overflows.
264 ///
265 /// If this overflow check is done via one of the overflow check intrinsics,
266 /// then CtxI has to be the call instruction calling that intrinsic. If this
267 /// overflow check is done by arithmetic followed by a compare, then CtxI has
268 /// to be the arithmetic instruction.
269 ///
270 /// If a simplification is possible, stores the simplified result of the
271 /// operation in OperationResult and result of the overflow check in
272 /// OverflowResult, and return true. If no simplification is possible,
273 /// returns false.
274 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned,
275 Value *LHS, Value *RHS,
276 Instruction &CtxI, Value *&OperationResult,
278
279 Instruction *visitCallBase(CallBase &Call);
280 Instruction *tryOptimizeCall(CallInst *CI);
281 bool transformConstExprCastCall(CallBase &Call);
282 Instruction *transformCallThroughTrampoline(CallBase &Call,
283 IntrinsicInst &Tramp);
284
285 // Return (a, b) if (LHS, RHS) is known to be (a, b) or (b, a).
286 // Otherwise, return std::nullopt
287 // Currently it matches:
288 // - LHS = (select c, a, b), RHS = (select c, b, a)
289 // - LHS = (phi [a, BB0], [b, BB1]), RHS = (phi [b, BB0], [a, BB1])
290 // - LHS = min(a, b), RHS = max(a, b)
291 std::optional<std::pair<Value *, Value *>> matchSymmetricPair(Value *LHS,
292 Value *RHS);
293
294 Value *simplifyMaskedLoad(IntrinsicInst &II);
295 Instruction *simplifyMaskedStore(IntrinsicInst &II);
296 Instruction *simplifyMaskedGather(IntrinsicInst &II);
297 Instruction *simplifyMaskedScatter(IntrinsicInst &II);
298
299 /// Transform (zext icmp) to bitwise / integer operations in order to
300 /// eliminate it.
301 ///
302 /// \param ICI The icmp of the (zext icmp) pair we are interested in.
303 /// \parem CI The zext of the (zext icmp) pair we are interested in.
304 ///
305 /// \return null if the transformation cannot be performed. If the
306 /// transformation can be performed the new instruction that replaces the
307 /// (zext icmp) pair will be returned.
308 Instruction *transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext);
309
310 Instruction *transformSExtICmp(ICmpInst *Cmp, SExtInst &Sext);
311
312 bool willNotOverflowSignedAdd(const WithCache<const Value *> &LHS,
314 const Instruction &CxtI) const {
315 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) ==
316 OverflowResult::NeverOverflows;
317 }
318
319 bool willNotOverflowUnsignedAdd(const WithCache<const Value *> &LHS,
321 const Instruction &CxtI) const {
322 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) ==
323 OverflowResult::NeverOverflows;
324 }
325
326 bool willNotOverflowAdd(const Value *LHS, const Value *RHS,
327 const Instruction &CxtI, bool IsSigned) const {
328 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI)
329 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI);
330 }
331
332 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
333 const Instruction &CxtI) const {
334 return computeOverflowForSignedSub(LHS, RHS, &CxtI) ==
335 OverflowResult::NeverOverflows;
336 }
337
338 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
339 const Instruction &CxtI) const {
340 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) ==
341 OverflowResult::NeverOverflows;
342 }
343
344 bool willNotOverflowSub(const Value *LHS, const Value *RHS,
345 const Instruction &CxtI, bool IsSigned) const {
346 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI)
347 : willNotOverflowUnsignedSub(LHS, RHS, CxtI);
348 }
349
350 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
351 const Instruction &CxtI) const {
352 return computeOverflowForSignedMul(LHS, RHS, &CxtI) ==
353 OverflowResult::NeverOverflows;
354 }
355
356 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
357 const Instruction &CxtI) const {
358 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) ==
359 OverflowResult::NeverOverflows;
360 }
361
362 bool willNotOverflowMul(const Value *LHS, const Value *RHS,
363 const Instruction &CxtI, bool IsSigned) const {
364 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI)
365 : willNotOverflowUnsignedMul(LHS, RHS, CxtI);
366 }
367
369 const Value *RHS, const Instruction &CxtI,
370 bool IsSigned) const {
371 switch (Opcode) {
372 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned);
373 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned);
374 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned);
375 default: llvm_unreachable("Unexpected opcode for overflow query");
376 }
377 }
378
379 Value *EmitGEPOffset(User *GEP);
380 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
381 Instruction *foldBitcastExtElt(ExtractElementInst &ExtElt);
382 Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
383 Instruction *foldFBinOpOfIntCasts(BinaryOperator &I);
384 // Should only be called by `foldFBinOpOfIntCasts`.
385 Instruction *foldFBinOpOfIntCastsFromSign(
386 BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
388 Instruction *foldBinopOfSextBoolToSelect(BinaryOperator &I);
389 Instruction *narrowBinOp(TruncInst &Trunc);
390 Instruction *narrowMaskedBinOp(BinaryOperator &And);
391 Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
392 Instruction *narrowFunnelShift(TruncInst &Trunc);
393 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
394 Instruction *matchSAddSubSat(IntrinsicInst &MinMax1);
395 Instruction *foldNot(BinaryOperator &I);
396 Instruction *foldBinOpOfDisplacedShifts(BinaryOperator &I);
397
398 /// Determine if a pair of casts can be replaced by a single cast.
399 ///
400 /// \param CI1 The first of a pair of casts.
401 /// \param CI2 The second of a pair of casts.
402 ///
403 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an
404 /// Instruction::CastOps value for a cast that can replace the pair, casting
405 /// CI1->getSrcTy() to CI2->getDstTy().
406 ///
407 /// \see CastInst::isEliminableCastPair
408 Instruction::CastOps isEliminableCastPair(const CastInst *CI1,
409 const CastInst *CI2);
410 Value *simplifyIntToPtrRoundTripCast(Value *Val);
411
412 Value *foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &I,
413 bool IsAnd, bool IsLogical = false);
414 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor);
415
416 Value *foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd);
417
418 Value *foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, ICmpInst *ICmp2,
419 bool IsAnd);
420
421 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp).
422 /// NOTE: Unlike most of instcombine, this returns a Value which should
423 /// already be inserted into the function.
424 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd,
425 bool IsLogicalSelect = false);
426
427 Instruction *foldLogicOfIsFPClass(BinaryOperator &Operator, Value *LHS,
428 Value *RHS);
429
431 canonicalizeConditionalNegationViaMathToSelect(BinaryOperator &i);
432
433 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
434 Instruction *CxtI, bool IsAnd,
435 bool IsLogical = false);
436 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D,
437 bool InvertFalseVal = false);
438 Value *getSelectCondition(Value *A, Value *B, bool ABIsTheSame);
439
440 Instruction *foldLShrOverflowBit(BinaryOperator &I);
441 Instruction *foldExtractOfOverflowIntrinsic(ExtractValueInst &EV);
442 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
443 Instruction *foldIntrinsicIsFPClass(IntrinsicInst &II);
444 Instruction *foldFPSignBitOps(BinaryOperator &I);
445 Instruction *foldFDivConstantDivisor(BinaryOperator &I);
446
447 // Optimize one of these forms:
448 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true)
449 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false)
450 // into simplier select instruction using isImpliedCondition.
451 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI,
452 bool IsAnd);
453
454 Instruction *hoistFNegAboveFMulFDiv(Value *FNegOp, Instruction &FMFSource);
455
456public:
457 /// Create and insert the idiom we use to indicate a block is unreachable
458 /// without having to rewrite the CFG from within InstCombine.
460 auto &Ctx = InsertAt->getContext();
461 auto *SI = new StoreInst(ConstantInt::getTrue(Ctx),
462 PoisonValue::get(PointerType::getUnqual(Ctx)),
463 /*isVolatile*/ false, Align(1));
464 InsertNewInstBefore(SI, InsertAt->getIterator());
465 }
466
467 /// Combiner aware instruction erasure.
468 ///
469 /// When dealing with an instruction that has side effects or produces a void
470 /// value, we can't rely on DCE to delete the instruction. Instead, visit
471 /// methods should return the value returned by this function.
473 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
474 assert(I.use_empty() && "Cannot erase instruction that is used!");
476
477 // Make sure that we reprocess all operands now that we reduced their
478 // use counts.
479 SmallVector<Value *> Ops(I.operands());
480 Worklist.remove(&I);
481 DC.removeValue(&I);
482 I.eraseFromParent();
483 for (Value *Op : Ops)
484 Worklist.handleUseCountDecrement(Op);
485 MadeIRChange = true;
486 return nullptr; // Don't do anything with FI
487 }
488
489 OverflowResult computeOverflow(
490 Instruction::BinaryOps BinaryOp, bool IsSigned,
491 Value *LHS, Value *RHS, Instruction *CxtI) const;
492
493 /// Performs a few simplifications for operators which are associative
494 /// or commutative.
495 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
496
497 /// Tries to simplify binary operations which some other binary
498 /// operation distributes over.
499 ///
500 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
501 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
502 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
503 /// value, or null if it didn't simplify.
504 Value *foldUsingDistributiveLaws(BinaryOperator &I);
505
506 /// Tries to simplify add operations using the definition of remainder.
507 ///
508 /// The definition of remainder is X % C = X - (X / C ) * C. The add
509 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to
510 /// X % (C0 * C1)
511 Value *SimplifyAddWithRemainder(BinaryOperator &I);
512
513 // Binary Op helper for select operations where the expression can be
514 // efficiently reorganized.
515 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS,
516 Value *RHS);
517
518 // If `I` has operand `(ctpop (not x))`, fold `I` with `(sub nuw nsw
519 // BitWidth(x), (ctpop x))`.
520 Instruction *tryFoldInstWithCtpopWithNot(Instruction *I);
521
522 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
523 // -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
524 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
525 // -> (BinOp (logic_shift (BinOp X, Y)), Mask)
526 Instruction *foldBinOpShiftWithShift(BinaryOperator &I);
527
528 /// Tries to simplify binops of select and cast of the select condition.
529 ///
530 /// (Binop (cast C), (select C, T, F))
531 /// -> (select C, C0, C1)
532 Instruction *foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I);
533
534 /// This tries to simplify binary operations by factorizing out common terms
535 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
536 Value *tryFactorizationFolds(BinaryOperator &I);
537
538 /// Match a select chain which produces one of three values based on whether
539 /// the LHS is less than, equal to, or greater than RHS respectively.
540 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less,
541 /// Equal and Greater values are saved in the matching process and returned to
542 /// the caller.
543 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS,
544 ConstantInt *&Less, ConstantInt *&Equal,
545 ConstantInt *&Greater);
546
547 /// Attempts to replace V with a simpler value based on the demanded
548 /// bits.
549 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known,
550 unsigned Depth, Instruction *CxtI);
551 bool SimplifyDemandedBits(Instruction *I, unsigned Op,
552 const APInt &DemandedMask, KnownBits &Known,
553 unsigned Depth = 0) override;
554
555 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
556 /// bits. It also tries to handle simplifications that can be done based on
557 /// DemandedMask, but without modifying the Instruction.
558 Value *SimplifyMultipleUseDemandedBits(Instruction *I,
559 const APInt &DemandedMask,
560 KnownBits &Known,
561 unsigned Depth, Instruction *CxtI);
562
563 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
564 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
565 Value *simplifyShrShlDemandedBits(
566 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
567 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
568
569 /// Tries to simplify operands to an integer instruction based on its
570 /// demanded bits.
571 bool SimplifyDemandedInstructionBits(Instruction &Inst);
572 bool SimplifyDemandedInstructionBits(Instruction &Inst, KnownBits &Known);
573
574 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
575 APInt &PoisonElts, unsigned Depth = 0,
576 bool AllowMultipleUsers = false) override;
577
578 /// Attempts to replace V with a simpler value based on the demanded
579 /// floating-point classes
580 Value *SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask,
581 KnownFPClass &Known, unsigned Depth,
582 Instruction *CxtI);
583 bool SimplifyDemandedFPClass(Instruction *I, unsigned Op,
584 FPClassTest DemandedMask, KnownFPClass &Known,
585 unsigned Depth = 0);
586
587 /// Canonicalize the position of binops relative to shufflevector.
588 Instruction *foldVectorBinop(BinaryOperator &Inst);
590 Instruction *foldSelectShuffle(ShuffleVectorInst &Shuf);
591
592 /// Given a binary operator, cast instruction, or select which has a PHI node
593 /// as operand #0, see if we can fold the instruction into the PHI (which is
594 /// only possible if all operands to the PHI are constants).
595 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN);
596
597 /// For a binary operator with 2 phi operands, try to hoist the binary
598 /// operation before the phi. This can result in fewer instructions in
599 /// patterns where at least one set of phi operands simplifies.
600 /// Example:
601 /// BB3: binop (phi [X, BB1], [C1, BB2]), (phi [Y, BB1], [C2, BB2])
602 /// -->
603 /// BB1: BO = binop X, Y
604 /// BB3: phi [BO, BB1], [(binop C1, C2), BB2]
605 Instruction *foldBinopWithPhiOperands(BinaryOperator &BO);
606
607 /// Given an instruction with a select as one operand and a constant as the
608 /// other operand, try to fold the binary operator into the select arguments.
609 /// This also works for Cast instructions, which obviously do not have a
610 /// second operand.
611 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
612 bool FoldWithMultiUse = false);
613
614 /// This is a convenience wrapper function for the above two functions.
615 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I);
616
617 Instruction *foldAddWithConstant(BinaryOperator &Add);
618
619 Instruction *foldSquareSumInt(BinaryOperator &I);
620 Instruction *foldSquareSumFP(BinaryOperator &I);
621
622 /// Try to rotate an operation below a PHI node, using PHI nodes for
623 /// its operands.
624 Instruction *foldPHIArgOpIntoPHI(PHINode &PN);
625 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN);
626 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN);
627 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN);
628 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN);
629 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN);
630 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN);
631 Instruction *foldPHIArgIntToPtrToPHI(PHINode &PN);
632
633 /// If an integer typed PHI has only one use which is an IntToPtr operation,
634 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
635 /// insert a new pointer typed PHI and replace the original one.
636 bool foldIntegerTypedPHI(PHINode &PN);
637
638 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the
639 /// folded operation.
640 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN);
641
642 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
644 Instruction *foldSelectICmp(ICmpInst::Predicate Pred, SelectInst *SI,
645 Value *RHS, const ICmpInst &I);
646 bool foldAllocaCmp(AllocaInst *Alloca);
647 Instruction *foldCmpLoadFromIndexedGlobal(LoadInst *LI,
649 GlobalVariable *GV, CmpInst &ICI,
650 ConstantInt *AndCst = nullptr);
651 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
652 Constant *RHSC);
653 Instruction *foldICmpAddOpConst(Value *X, const APInt &C,
655 Instruction *foldICmpWithCastOp(ICmpInst &ICmp);
656 Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp);
657
658 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp);
660 Instruction *foldICmpWithConstant(ICmpInst &Cmp);
661 Instruction *foldICmpUsingBoolRange(ICmpInst &I);
662 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
663 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
664 Instruction *foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp,
665 const APInt &C);
666 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
667 Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax,
668 Value *Z, ICmpInst::Predicate Pred);
669 Instruction *foldICmpEquality(ICmpInst &Cmp);
670 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
671 Instruction *foldSignBitTest(ICmpInst &I);
672 Instruction *foldICmpWithZero(ICmpInst &Cmp);
673
674 Value *foldMultiplicationOverflowCheck(ICmpInst &Cmp);
675
676 Instruction *foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO,
677 const APInt &C);
678 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select,
679 ConstantInt *C);
680 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
681 const APInt &C);
682 Instruction *foldICmpTruncWithTruncOrExt(ICmpInst &Cmp,
683 const SimplifyQuery &Q);
684 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
685 const APInt &C);
686 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
687 const APInt &C);
688 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
689 const APInt &C);
690 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul,
691 const APInt &C);
692 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl,
693 const APInt &C);
694 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr,
695 const APInt &C);
696 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
697 const APInt &C);
698 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
699 const APInt &C);
700 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div,
701 const APInt &C);
702 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub,
703 const APInt &C);
704 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add,
705 const APInt &C);
706 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
707 const APInt &C1);
708 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
709 const APInt &C1, const APInt &C2);
710 Instruction *foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor,
711 const APInt &C);
712 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
713 const APInt &C2);
714 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
715 const APInt &C2);
716
717 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
718 BinaryOperator *BO,
719 const APInt &C);
720 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
721 const APInt &C);
722 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
723 const APInt &C);
724 Instruction *foldICmpBitCast(ICmpInst &Cmp);
725 Instruction *foldICmpWithTrunc(ICmpInst &Cmp);
726 Instruction *foldICmpCommutative(ICmpInst::Predicate Pred, Value *Op0,
727 Value *Op1, ICmpInst &CxtI);
728
729 // Helpers of visitSelectInst().
732 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
733 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *);
735 Value *A, Value *B, Instruction &Outer,
740 unsigned Depth = 0);
741
742 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
743 bool isSigned, bool Inside);
744 bool mergeStoreIntoSuccessor(StoreInst &SI);
745
746 /// Given an initial instruction, check to see if it is the root of a
747 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse
748 /// intrinsic.
749 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps,
750 bool MatchBitReversals);
751
752 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI);
753 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI);
754
755 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
756
757 bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock);
758 void tryToSinkInstructionDbgValues(
759 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
761 void tryToSinkInstructionDPValues(Instruction *I,
762 BasicBlock::iterator InsertPos,
763 BasicBlock *SrcBlock, BasicBlock *DestBlock,
765
766 bool removeInstructionsBeforeUnreachable(Instruction &I);
767 void addDeadEdge(BasicBlock *From, BasicBlock *To,
769 void handleUnreachableFrom(Instruction *I,
771 void handlePotentiallyDeadBlocks(SmallVectorImpl<BasicBlock *> &Worklist);
772 void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc);
773 void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser = nullptr);
774};
775
776class Negator final {
777 /// Top-to-bottom, def-to-use negated instruction tree we produced.
779
781 BuilderTy Builder;
782
783 const bool IsTrulyNegation;
784
785 SmallDenseMap<Value *, Value *> NegationsCache;
786
787 Negator(LLVMContext &C, const DataLayout &DL, bool IsTrulyNegation);
788
789#if LLVM_ENABLE_STATS
790 unsigned NumValuesVisitedInThisNegator = 0;
791 ~Negator();
792#endif
793
794 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/,
795 Value * /*NegatedRoot*/>;
796
797 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I);
798
799 [[nodiscard]] Value *visitImpl(Value *V, bool IsNSW, unsigned Depth);
800
801 [[nodiscard]] Value *negate(Value *V, bool IsNSW, unsigned Depth);
802
803 /// Recurse depth-first and attempt to sink the negation.
804 /// FIXME: use worklist?
805 [[nodiscard]] std::optional<Result> run(Value *Root, bool IsNSW);
806
807 Negator(const Negator &) = delete;
808 Negator(Negator &&) = delete;
809 Negator &operator=(const Negator &) = delete;
810 Negator &operator=(Negator &&) = delete;
811
812public:
813 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed,
814 /// otherwise returns negated value.
815 [[nodiscard]] static Value *Negate(bool LHSIsZero, bool IsNSW, Value *Root,
816 InstCombinerImpl &IC);
817};
818
819} // end namespace llvm
820
821#undef DEBUG_TYPE
822
823#endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static bool foldICmpWithDominatingICmp(CmpInst *Cmp, const TargetLowering &TLI)
For pattern like:
#define LLVM_LIBRARY_VISIBILITY
Definition: Compiler.h:131
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Align
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
Hexagon Common GEP
IRTranslator LLVM IR MI
static constexpr unsigned NegatorMaxNodesSSO
static constexpr unsigned NegatorDefaultMaxDepth
This file provides the interface for the instcombine pass implementation.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const AddOperator *Add, const SimplifyQuery &SQ)
Value * RHS
Value * LHS
BinaryOperator * Mul
support::ulittle16_t & Lo
Definition: aarch32.cpp:206
support::ulittle16_t & Hi
Definition: aarch32.cpp:205
static const uint32_t IV[8]
Definition: blake3_impl.h:78
Class for arbitrary precision integers.
Definition: APInt.h:76
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
This class represents any memset intrinsic.
A cache of @llvm.assume calls within a function.
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:164
This class represents a no-op cast from one type to another.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1455
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:579
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:955
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:965
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
This instruction extracts a single (scalar) element from a VectorType value.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
An instruction for ordering other memory operations.
Definition: Instructions.h:460
This class represents a freeze function that returns random concrete value if an operand is either a ...
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
This instruction compares its operands according to the predicate given to the constructor.
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
bool fmulByZeroIsZero(Value *MulVal, FastMathFlags FMF, const Instruction *CtxI) const
Check if fmul MulVal, +0.0 will yield +0.0 (or signed zero is ignorable).
virtual ~InstCombinerImpl()=default
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Instruction * foldVectorSelect(SelectInst &Sel)
Instruction * foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI)
Instruction * foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, Value *A, Value *B, Instruction &Outer, SelectPatternFlavor SPF2, Value *C)
Constant * getLosslessUnsignedTrunc(Constant *C, Type *TruncTy)
bool replaceInInstruction(Value *V, Value *Old, Value *New, unsigned Depth=0)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI)
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Instruction * visitInstruction(Instruction &I)
Specify what to return for unhandled instructions.
KnownFPClass computeKnownFPClass(Value *Val, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Constant * getLosslessSignedTrunc(Constant *C, Type *TruncTy)
InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder, bool MinimizeSize, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Instruction * visitSelectInst(SelectInst &SI)
Instruction * foldSelectOfBools(SelectInst &SI)
Instruction * foldSelectExtConst(SelectInst &Sel)
The core instruction combiner logic.
Definition: InstCombiner.h:47
Base class for instruction visitors.
Definition: InstVisitor.h:78
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Definition: Instructions.h:184
This class represents min/max intrinsics.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
The optimization diagnostic interface.
Analysis providing profile information.
This class represents a cast from a pointer to an integer.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
Multiway switch.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
This function has undefined behavior.
This represents the llvm.va_end intrinsic.
LLVM Value Representation.
Definition: Value.h:74
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
This class represents zero extension of integer types.
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
OverflowResult
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1581
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
SelectPatternFlavor
Specific patterns of select instructions we can match.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.