LLVM 19.0.0git
InstCombineInternal.h
Go to the documentation of this file.
1//===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10///
11/// This file provides internal interfaces used to implement the InstCombine.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
17
18#include "llvm/ADT/Statistic.h"
23#include "llvm/IR/IRBuilder.h"
24#include "llvm/IR/InstVisitor.h"
26#include "llvm/IR/Value.h"
27#include "llvm/Support/Debug.h"
31#include <cassert>
32
33#define DEBUG_TYPE "instcombine"
35
36using namespace llvm::PatternMatch;
37
38// As a default, let's assume that we want to be aggressive,
39// and attempt to traverse with no limits in attempt to sink negation.
40static constexpr unsigned NegatorDefaultMaxDepth = ~0U;
41
42// Let's guesstimate that most often we will end up visiting/producing
43// fairly small number of new instructions.
44static constexpr unsigned NegatorMaxNodesSSO = 16;
45
46namespace llvm {
47
48class AAResults;
49class APInt;
50class AssumptionCache;
52class DataLayout;
53class DominatorTree;
54class GEPOperator;
55class GlobalVariable;
56class LoopInfo;
60class User;
61
63 : public InstCombiner,
64 public InstVisitor<InstCombinerImpl, Instruction *> {
65public:
67 bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
71 const DataLayout &DL, LoopInfo *LI)
72 : InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE,
73 BFI, PSI, DL, LI) {}
74
75 virtual ~InstCombinerImpl() = default;
76
77 /// Perform early cleanup and prepare the InstCombine worklist.
78 bool prepareWorklist(Function &F,
80
81 /// Run the combiner over the entire worklist until it is empty.
82 ///
83 /// \returns true if the IR is changed.
84 bool run();
85
86 // Visitation implementation - Implement instruction combining for different
87 // instruction types. The semantics are as follows:
88 // Return Value:
89 // null - No change was made
90 // I - Change was made, I is still valid, I may be dead though
91 // otherwise - Change was made, replace I with returned instruction
92 //
93 Instruction *visitFNeg(UnaryOperator &I);
94 Instruction *visitAdd(BinaryOperator &I);
95 Instruction *visitFAdd(BinaryOperator &I);
96 Value *OptimizePointerDifference(
97 Value *LHS, Value *RHS, Type *Ty, bool isNUW);
98 Instruction *visitSub(BinaryOperator &I);
99 Instruction *visitFSub(BinaryOperator &I);
100 Instruction *visitMul(BinaryOperator &I);
101 Instruction *foldFMulReassoc(BinaryOperator &I);
102 Instruction *visitFMul(BinaryOperator &I);
103 Instruction *visitURem(BinaryOperator &I);
104 Instruction *visitSRem(BinaryOperator &I);
105 Instruction *visitFRem(BinaryOperator &I);
106 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I);
107 Instruction *commonIRemTransforms(BinaryOperator &I);
108 Instruction *commonIDivTransforms(BinaryOperator &I);
109 Instruction *visitUDiv(BinaryOperator &I);
110 Instruction *visitSDiv(BinaryOperator &I);
111 Instruction *visitFDiv(BinaryOperator &I);
112 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
113 Instruction *visitAnd(BinaryOperator &I);
114 Instruction *visitOr(BinaryOperator &I);
115 bool sinkNotIntoLogicalOp(Instruction &I);
116 bool sinkNotIntoOtherHandOfLogicalOp(Instruction &I);
117 Instruction *visitXor(BinaryOperator &I);
118 Instruction *visitShl(BinaryOperator &I);
119 Value *reassociateShiftAmtsOfTwoSameDirectionShifts(
120 BinaryOperator *Sh0, const SimplifyQuery &SQ,
121 bool AnalyzeForSignBitExtraction = false);
122 Instruction *canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
124 Instruction *foldVariableSignZeroExtensionOfVariableHighBitExtract(
125 BinaryOperator &OldAShr);
126 Instruction *visitAShr(BinaryOperator &I);
127 Instruction *visitLShr(BinaryOperator &I);
128 Instruction *commonShiftTransforms(BinaryOperator &I);
129 Instruction *visitFCmpInst(FCmpInst &I);
130 CmpInst *canonicalizeICmpPredicate(CmpInst &I);
131 Instruction *visitICmpInst(ICmpInst &I);
132 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
134 Instruction *commonCastTransforms(CastInst &CI);
135 Instruction *visitTrunc(TruncInst &CI);
136 Instruction *visitZExt(ZExtInst &Zext);
137 Instruction *visitSExt(SExtInst &Sext);
138 Instruction *visitFPTrunc(FPTruncInst &CI);
139 Instruction *visitFPExt(CastInst &CI);
140 Instruction *visitFPToUI(FPToUIInst &FI);
141 Instruction *visitFPToSI(FPToSIInst &FI);
142 Instruction *visitUIToFP(CastInst &CI);
143 Instruction *visitSIToFP(CastInst &CI);
144 Instruction *visitPtrToInt(PtrToIntInst &CI);
145 Instruction *visitIntToPtr(IntToPtrInst &CI);
146 Instruction *visitBitCast(BitCastInst &CI);
147 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
148 Instruction *foldItoFPtoI(CastInst &FI);
150 Instruction *visitCallInst(CallInst &CI);
151 Instruction *visitInvokeInst(InvokeInst &II);
152 Instruction *visitCallBrInst(CallBrInst &CBI);
153
154 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
155 Instruction *visitPHINode(PHINode &PN);
156 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
157 Instruction *visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src);
158 Instruction *visitAllocaInst(AllocaInst &AI);
159 Instruction *visitAllocSite(Instruction &FI);
160 Instruction *visitFree(CallInst &FI, Value *FreedOp);
161 Instruction *visitLoadInst(LoadInst &LI);
162 Instruction *visitStoreInst(StoreInst &SI);
163 Instruction *visitAtomicRMWInst(AtomicRMWInst &SI);
164 Instruction *visitUnconditionalBranchInst(BranchInst &BI);
165 Instruction *visitBranchInst(BranchInst &BI);
166 Instruction *visitFenceInst(FenceInst &FI);
167 Instruction *visitSwitchInst(SwitchInst &SI);
168 Instruction *visitReturnInst(ReturnInst &RI);
169 Instruction *visitUnreachableInst(UnreachableInst &I);
171 foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI);
172 Instruction *visitInsertValueInst(InsertValueInst &IV);
173 Instruction *visitInsertElementInst(InsertElementInst &IE);
174 Instruction *visitExtractElementInst(ExtractElementInst &EI);
175 Instruction *simplifyBinOpSplats(ShuffleVectorInst &SVI);
176 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
177 Instruction *visitExtractValueInst(ExtractValueInst &EV);
178 Instruction *visitLandingPadInst(LandingPadInst &LI);
179 Instruction *visitVAEndInst(VAEndInst &I);
180 Value *pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI);
181 bool freezeOtherUses(FreezeInst &FI);
182 Instruction *foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN);
183 Instruction *visitFreeze(FreezeInst &I);
184
185 /// Specify what to return for unhandled instructions.
187
188 /// True when DB dominates all uses of DI except UI.
189 /// UI must be in the same block as DI.
190 /// The routine checks that the DI parent and DB are different.
191 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
192 const BasicBlock *DB) const;
193
194 /// Try to replace select with select operand SIOpd in SI-ICmp sequence.
195 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
196 const unsigned SIOpd);
197
198 LoadInst *combineLoadToNewType(LoadInst &LI, Type *NewTy,
199 const Twine &Suffix = "");
200
202 FPClassTest Interested = fcAllFlags,
203 const Instruction *CtxI = nullptr,
204 unsigned Depth = 0) const {
206 Val, FMF, Interested, Depth,
207 getSimplifyQuery().getWithInstruction(CtxI));
208 }
209
211 FPClassTest Interested = fcAllFlags,
212 const Instruction *CtxI = nullptr,
213 unsigned Depth = 0) const {
215 Val, Interested, Depth, getSimplifyQuery().getWithInstruction(CtxI));
216 }
217
218 /// Check if fmul \p MulVal, +0.0 will yield +0.0 (or signed zero is
219 /// ignorable).
221 const Instruction *CtxI) const;
222
223 Constant *getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp) {
224 Constant *TruncC = ConstantExpr::getTrunc(C, TruncTy);
225 Constant *ExtTruncC =
226 ConstantFoldCastOperand(ExtOp, TruncC, C->getType(), DL);
227 if (ExtTruncC && ExtTruncC == C)
228 return TruncC;
229 return nullptr;
230 }
231
233 return getLosslessTrunc(C, TruncTy, Instruction::ZExt);
234 }
235
237 return getLosslessTrunc(C, TruncTy, Instruction::SExt);
238 }
239
240 std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
241 convertOrOfShiftsToFunnelShift(Instruction &Or);
242
243private:
244 bool annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI);
245 bool isDesirableIntType(unsigned BitWidth) const;
246 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
247 bool shouldChangeType(Type *From, Type *To) const;
248 Value *dyn_castNegVal(Value *V) const;
249
250 /// Classify whether a cast is worth optimizing.
251 ///
252 /// This is a helper to decide whether the simplification of
253 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed.
254 ///
255 /// \param CI The cast we are interested in.
256 ///
257 /// \return true if this cast actually results in any code being generated and
258 /// if it cannot already be eliminated by some other transformation.
259 bool shouldOptimizeCast(CastInst *CI);
260
261 /// Try to optimize a sequence of instructions checking if an operation
262 /// on LHS and RHS overflows.
263 ///
264 /// If this overflow check is done via one of the overflow check intrinsics,
265 /// then CtxI has to be the call instruction calling that intrinsic. If this
266 /// overflow check is done by arithmetic followed by a compare, then CtxI has
267 /// to be the arithmetic instruction.
268 ///
269 /// If a simplification is possible, stores the simplified result of the
270 /// operation in OperationResult and result of the overflow check in
271 /// OverflowResult, and return true. If no simplification is possible,
272 /// returns false.
273 bool OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned,
274 Value *LHS, Value *RHS,
275 Instruction &CtxI, Value *&OperationResult,
277
278 Instruction *visitCallBase(CallBase &Call);
279 Instruction *tryOptimizeCall(CallInst *CI);
280 bool transformConstExprCastCall(CallBase &Call);
281 Instruction *transformCallThroughTrampoline(CallBase &Call,
282 IntrinsicInst &Tramp);
283
284 // Return (a, b) if (LHS, RHS) is known to be (a, b) or (b, a).
285 // Otherwise, return std::nullopt
286 // Currently it matches:
287 // - LHS = (select c, a, b), RHS = (select c, b, a)
288 // - LHS = (phi [a, BB0], [b, BB1]), RHS = (phi [b, BB0], [a, BB1])
289 // - LHS = min(a, b), RHS = max(a, b)
290 std::optional<std::pair<Value *, Value *>> matchSymmetricPair(Value *LHS,
291 Value *RHS);
292
293 Value *simplifyMaskedLoad(IntrinsicInst &II);
294 Instruction *simplifyMaskedStore(IntrinsicInst &II);
295 Instruction *simplifyMaskedGather(IntrinsicInst &II);
296 Instruction *simplifyMaskedScatter(IntrinsicInst &II);
297
298 /// Transform (zext icmp) to bitwise / integer operations in order to
299 /// eliminate it.
300 ///
301 /// \param ICI The icmp of the (zext icmp) pair we are interested in.
302 /// \parem CI The zext of the (zext icmp) pair we are interested in.
303 ///
304 /// \return null if the transformation cannot be performed. If the
305 /// transformation can be performed the new instruction that replaces the
306 /// (zext icmp) pair will be returned.
307 Instruction *transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext);
308
309 Instruction *transformSExtICmp(ICmpInst *Cmp, SExtInst &Sext);
310
311 bool willNotOverflowSignedAdd(const WithCache<const Value *> &LHS,
313 const Instruction &CxtI) const {
314 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) ==
315 OverflowResult::NeverOverflows;
316 }
317
318 bool willNotOverflowUnsignedAdd(const WithCache<const Value *> &LHS,
320 const Instruction &CxtI) const {
321 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) ==
322 OverflowResult::NeverOverflows;
323 }
324
325 bool willNotOverflowAdd(const Value *LHS, const Value *RHS,
326 const Instruction &CxtI, bool IsSigned) const {
327 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI)
328 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI);
329 }
330
331 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
332 const Instruction &CxtI) const {
333 return computeOverflowForSignedSub(LHS, RHS, &CxtI) ==
334 OverflowResult::NeverOverflows;
335 }
336
337 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
338 const Instruction &CxtI) const {
339 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) ==
340 OverflowResult::NeverOverflows;
341 }
342
343 bool willNotOverflowSub(const Value *LHS, const Value *RHS,
344 const Instruction &CxtI, bool IsSigned) const {
345 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI)
346 : willNotOverflowUnsignedSub(LHS, RHS, CxtI);
347 }
348
349 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
350 const Instruction &CxtI) const {
351 return computeOverflowForSignedMul(LHS, RHS, &CxtI) ==
352 OverflowResult::NeverOverflows;
353 }
354
355 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
356 const Instruction &CxtI) const {
357 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) ==
358 OverflowResult::NeverOverflows;
359 }
360
361 bool willNotOverflowMul(const Value *LHS, const Value *RHS,
362 const Instruction &CxtI, bool IsSigned) const {
363 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI)
364 : willNotOverflowUnsignedMul(LHS, RHS, CxtI);
365 }
366
368 const Value *RHS, const Instruction &CxtI,
369 bool IsSigned) const {
370 switch (Opcode) {
371 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned);
372 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned);
373 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned);
374 default: llvm_unreachable("Unexpected opcode for overflow query");
375 }
376 }
377
378 Value *EmitGEPOffset(User *GEP);
379 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
380 Instruction *foldBitcastExtElt(ExtractElementInst &ExtElt);
381 Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
382 Instruction *foldBinopOfSextBoolToSelect(BinaryOperator &I);
383 Instruction *narrowBinOp(TruncInst &Trunc);
384 Instruction *narrowMaskedBinOp(BinaryOperator &And);
385 Instruction *narrowMathIfNoOverflow(BinaryOperator &I);
386 Instruction *narrowFunnelShift(TruncInst &Trunc);
387 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN);
388 Instruction *matchSAddSubSat(IntrinsicInst &MinMax1);
389 Instruction *foldNot(BinaryOperator &I);
390 Instruction *foldBinOpOfDisplacedShifts(BinaryOperator &I);
391
392 /// Determine if a pair of casts can be replaced by a single cast.
393 ///
394 /// \param CI1 The first of a pair of casts.
395 /// \param CI2 The second of a pair of casts.
396 ///
397 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an
398 /// Instruction::CastOps value for a cast that can replace the pair, casting
399 /// CI1->getSrcTy() to CI2->getDstTy().
400 ///
401 /// \see CastInst::isEliminableCastPair
402 Instruction::CastOps isEliminableCastPair(const CastInst *CI1,
403 const CastInst *CI2);
404 Value *simplifyIntToPtrRoundTripCast(Value *Val);
405
406 Value *foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &I,
407 bool IsAnd, bool IsLogical = false);
408 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, BinaryOperator &Xor);
409
410 Value *foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd);
411
412 Value *foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, ICmpInst *ICmp2,
413 bool IsAnd);
414
415 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp).
416 /// NOTE: Unlike most of instcombine, this returns a Value which should
417 /// already be inserted into the function.
418 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd,
419 bool IsLogicalSelect = false);
420
421 Instruction *foldLogicOfIsFPClass(BinaryOperator &Operator, Value *LHS,
422 Value *RHS);
423
425 canonicalizeConditionalNegationViaMathToSelect(BinaryOperator &i);
426
427 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
428 Instruction *CxtI, bool IsAnd,
429 bool IsLogical = false);
430 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D,
431 bool InvertFalseVal = false);
432 Value *getSelectCondition(Value *A, Value *B, bool ABIsTheSame);
433
434 Instruction *foldLShrOverflowBit(BinaryOperator &I);
435 Instruction *foldExtractOfOverflowIntrinsic(ExtractValueInst &EV);
436 Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
437 Instruction *foldIntrinsicIsFPClass(IntrinsicInst &II);
438 Instruction *foldFPSignBitOps(BinaryOperator &I);
439 Instruction *foldFDivConstantDivisor(BinaryOperator &I);
440
441 // Optimize one of these forms:
442 // and i1 Op, SI / select i1 Op, i1 SI, i1 false (if IsAnd = true)
443 // or i1 Op, SI / select i1 Op, i1 true, i1 SI (if IsAnd = false)
444 // into simplier select instruction using isImpliedCondition.
445 Instruction *foldAndOrOfSelectUsingImpliedCond(Value *Op, SelectInst &SI,
446 bool IsAnd);
447
448 Instruction *hoistFNegAboveFMulFDiv(Value *FNegOp, Instruction &FMFSource);
449
450public:
451 /// Create and insert the idiom we use to indicate a block is unreachable
452 /// without having to rewrite the CFG from within InstCombine.
454 auto &Ctx = InsertAt->getContext();
455 auto *SI = new StoreInst(ConstantInt::getTrue(Ctx),
456 PoisonValue::get(PointerType::getUnqual(Ctx)),
457 /*isVolatile*/ false, Align(1));
458 InsertNewInstBefore(SI, InsertAt->getIterator());
459 }
460
461 /// Combiner aware instruction erasure.
462 ///
463 /// When dealing with an instruction that has side effects or produces a void
464 /// value, we can't rely on DCE to delete the instruction. Instead, visit
465 /// methods should return the value returned by this function.
467 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n');
468 assert(I.use_empty() && "Cannot erase instruction that is used!");
470
471 // Make sure that we reprocess all operands now that we reduced their
472 // use counts.
473 SmallVector<Value *> Ops(I.operands());
474 Worklist.remove(&I);
475 DC.removeValue(&I);
476 I.eraseFromParent();
477 for (Value *Op : Ops)
478 Worklist.handleUseCountDecrement(Op);
479 MadeIRChange = true;
480 return nullptr; // Don't do anything with FI
481 }
482
483 OverflowResult computeOverflow(
484 Instruction::BinaryOps BinaryOp, bool IsSigned,
485 Value *LHS, Value *RHS, Instruction *CxtI) const;
486
487 /// Performs a few simplifications for operators which are associative
488 /// or commutative.
489 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
490
491 /// Tries to simplify binary operations which some other binary
492 /// operation distributes over.
493 ///
494 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
495 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
496 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
497 /// value, or null if it didn't simplify.
498 Value *foldUsingDistributiveLaws(BinaryOperator &I);
499
500 /// Tries to simplify add operations using the definition of remainder.
501 ///
502 /// The definition of remainder is X % C = X - (X / C ) * C. The add
503 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to
504 /// X % (C0 * C1)
505 Value *SimplifyAddWithRemainder(BinaryOperator &I);
506
507 // Binary Op helper for select operations where the expression can be
508 // efficiently reorganized.
509 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS,
510 Value *RHS);
511
512 // If `I` has operand `(ctpop (not x))`, fold `I` with `(sub nuw nsw
513 // BitWidth(x), (ctpop x))`.
514 Instruction *tryFoldInstWithCtpopWithNot(Instruction *I);
515
516 // (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
517 // -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
518 // (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
519 // -> (BinOp (logic_shift (BinOp X, Y)), Mask)
520 Instruction *foldBinOpShiftWithShift(BinaryOperator &I);
521
522 /// Tries to simplify binops of select and cast of the select condition.
523 ///
524 /// (Binop (cast C), (select C, T, F))
525 /// -> (select C, C0, C1)
526 Instruction *foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I);
527
528 /// This tries to simplify binary operations by factorizing out common terms
529 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
530 Value *tryFactorizationFolds(BinaryOperator &I);
531
532 /// Match a select chain which produces one of three values based on whether
533 /// the LHS is less than, equal to, or greater than RHS respectively.
534 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less,
535 /// Equal and Greater values are saved in the matching process and returned to
536 /// the caller.
537 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS,
538 ConstantInt *&Less, ConstantInt *&Equal,
539 ConstantInt *&Greater);
540
541 /// Attempts to replace V with a simpler value based on the demanded
542 /// bits.
543 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known,
544 unsigned Depth, Instruction *CxtI);
545 bool SimplifyDemandedBits(Instruction *I, unsigned Op,
546 const APInt &DemandedMask, KnownBits &Known,
547 unsigned Depth = 0) override;
548
549 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
550 /// bits. It also tries to handle simplifications that can be done based on
551 /// DemandedMask, but without modifying the Instruction.
552 Value *SimplifyMultipleUseDemandedBits(Instruction *I,
553 const APInt &DemandedMask,
554 KnownBits &Known,
555 unsigned Depth, Instruction *CxtI);
556
557 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
558 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
559 Value *simplifyShrShlDemandedBits(
560 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl,
561 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known);
562
563 /// Tries to simplify operands to an integer instruction based on its
564 /// demanded bits.
565 bool SimplifyDemandedInstructionBits(Instruction &Inst);
566 bool SimplifyDemandedInstructionBits(Instruction &Inst, KnownBits &Known);
567
568 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
569 APInt &PoisonElts, unsigned Depth = 0,
570 bool AllowMultipleUsers = false) override;
571
572 /// Attempts to replace V with a simpler value based on the demanded
573 /// floating-point classes
574 Value *SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask,
575 KnownFPClass &Known, unsigned Depth,
576 Instruction *CxtI);
577 bool SimplifyDemandedFPClass(Instruction *I, unsigned Op,
578 FPClassTest DemandedMask, KnownFPClass &Known,
579 unsigned Depth = 0);
580
581 /// Canonicalize the position of binops relative to shufflevector.
582 Instruction *foldVectorBinop(BinaryOperator &Inst);
584 Instruction *foldSelectShuffle(ShuffleVectorInst &Shuf);
585
586 /// Given a binary operator, cast instruction, or select which has a PHI node
587 /// as operand #0, see if we can fold the instruction into the PHI (which is
588 /// only possible if all operands to the PHI are constants).
589 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN);
590
591 /// For a binary operator with 2 phi operands, try to hoist the binary
592 /// operation before the phi. This can result in fewer instructions in
593 /// patterns where at least one set of phi operands simplifies.
594 /// Example:
595 /// BB3: binop (phi [X, BB1], [C1, BB2]), (phi [Y, BB1], [C2, BB2])
596 /// -->
597 /// BB1: BO = binop X, Y
598 /// BB3: phi [BO, BB1], [(binop C1, C2), BB2]
599 Instruction *foldBinopWithPhiOperands(BinaryOperator &BO);
600
601 /// Given an instruction with a select as one operand and a constant as the
602 /// other operand, try to fold the binary operator into the select arguments.
603 /// This also works for Cast instructions, which obviously do not have a
604 /// second operand.
605 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
606 bool FoldWithMultiUse = false);
607
608 /// This is a convenience wrapper function for the above two functions.
609 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I);
610
611 Instruction *foldAddWithConstant(BinaryOperator &Add);
612
613 Instruction *foldSquareSumInt(BinaryOperator &I);
614 Instruction *foldSquareSumFP(BinaryOperator &I);
615
616 /// Try to rotate an operation below a PHI node, using PHI nodes for
617 /// its operands.
618 Instruction *foldPHIArgOpIntoPHI(PHINode &PN);
619 Instruction *foldPHIArgBinOpIntoPHI(PHINode &PN);
620 Instruction *foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN);
621 Instruction *foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN);
622 Instruction *foldPHIArgGEPIntoPHI(PHINode &PN);
623 Instruction *foldPHIArgLoadIntoPHI(PHINode &PN);
624 Instruction *foldPHIArgZextsIntoPHI(PHINode &PN);
625 Instruction *foldPHIArgIntToPtrToPHI(PHINode &PN);
626
627 /// If an integer typed PHI has only one use which is an IntToPtr operation,
628 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
629 /// insert a new pointer typed PHI and replace the original one.
630 bool foldIntegerTypedPHI(PHINode &PN);
631
632 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the
633 /// folded operation.
634 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN);
635
636 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
638 Instruction *foldSelectICmp(ICmpInst::Predicate Pred, SelectInst *SI,
639 Value *RHS, const ICmpInst &I);
640 bool foldAllocaCmp(AllocaInst *Alloca);
641 Instruction *foldCmpLoadFromIndexedGlobal(LoadInst *LI,
643 GlobalVariable *GV, CmpInst &ICI,
644 ConstantInt *AndCst = nullptr);
645 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
646 Constant *RHSC);
647 Instruction *foldICmpAddOpConst(Value *X, const APInt &C,
649 Instruction *foldICmpWithCastOp(ICmpInst &ICmp);
650 Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp);
651
652 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp);
654 Instruction *foldICmpWithConstant(ICmpInst &Cmp);
655 Instruction *foldICmpUsingBoolRange(ICmpInst &I);
656 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
657 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
658 Instruction *foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp,
659 const APInt &C);
660 Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
661 Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax,
662 Value *Z, ICmpInst::Predicate Pred);
663 Instruction *foldICmpEquality(ICmpInst &Cmp);
664 Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
665 Instruction *foldSignBitTest(ICmpInst &I);
666 Instruction *foldICmpWithZero(ICmpInst &Cmp);
667
668 Value *foldMultiplicationOverflowCheck(ICmpInst &Cmp);
669
670 Instruction *foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO,
671 const APInt &C);
672 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select,
673 ConstantInt *C);
674 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
675 const APInt &C);
676 Instruction *foldICmpTruncWithTruncOrExt(ICmpInst &Cmp,
677 const SimplifyQuery &Q);
678 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
679 const APInt &C);
680 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
681 const APInt &C);
682 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
683 const APInt &C);
684 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul,
685 const APInt &C);
686 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl,
687 const APInt &C);
688 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr,
689 const APInt &C);
690 Instruction *foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
691 const APInt &C);
692 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv,
693 const APInt &C);
694 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div,
695 const APInt &C);
696 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub,
697 const APInt &C);
698 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add,
699 const APInt &C);
700 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
701 const APInt &C1);
702 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
703 const APInt &C1, const APInt &C2);
704 Instruction *foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor,
705 const APInt &C);
706 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
707 const APInt &C2);
708 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1,
709 const APInt &C2);
710
711 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
712 BinaryOperator *BO,
713 const APInt &C);
714 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
715 const APInt &C);
716 Instruction *foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II,
717 const APInt &C);
718 Instruction *foldICmpBitCast(ICmpInst &Cmp);
719 Instruction *foldICmpWithTrunc(ICmpInst &Cmp);
720 Instruction *foldICmpCommutative(ICmpInst::Predicate Pred, Value *Op0,
721 Value *Op1, ICmpInst &CxtI);
722
723 // Helpers of visitSelectInst().
726 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
727 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *);
729 Value *A, Value *B, Instruction &Outer,
734 unsigned Depth = 0);
735
736 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
737 bool isSigned, bool Inside);
738 bool mergeStoreIntoSuccessor(StoreInst &SI);
739
740 /// Given an initial instruction, check to see if it is the root of a
741 /// bswap/bitreverse idiom. If so, return the equivalent bswap/bitreverse
742 /// intrinsic.
743 Instruction *matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps,
744 bool MatchBitReversals);
745
746 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI);
747 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI);
748
749 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
750
751 bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock);
752 void tryToSinkInstructionDbgValues(
753 Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
755 void tryToSinkInstructionDPValues(Instruction *I,
756 BasicBlock::iterator InsertPos,
757 BasicBlock *SrcBlock, BasicBlock *DestBlock,
759
760 bool removeInstructionsBeforeUnreachable(Instruction &I);
761 void addDeadEdge(BasicBlock *From, BasicBlock *To,
763 void handleUnreachableFrom(Instruction *I,
765 void handlePotentiallyDeadBlocks(SmallVectorImpl<BasicBlock *> &Worklist);
766 void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc);
767 void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser = nullptr);
768};
769
770class Negator final {
771 /// Top-to-bottom, def-to-use negated instruction tree we produced.
773
775 BuilderTy Builder;
776
777 const bool IsTrulyNegation;
778
779 SmallDenseMap<Value *, Value *> NegationsCache;
780
781 Negator(LLVMContext &C, const DataLayout &DL, bool IsTrulyNegation);
782
783#if LLVM_ENABLE_STATS
784 unsigned NumValuesVisitedInThisNegator = 0;
785 ~Negator();
786#endif
787
788 using Result = std::pair<ArrayRef<Instruction *> /*NewInstructions*/,
789 Value * /*NegatedRoot*/>;
790
791 std::array<Value *, 2> getSortedOperandsOfBinOp(Instruction *I);
792
793 [[nodiscard]] Value *visitImpl(Value *V, bool IsNSW, unsigned Depth);
794
795 [[nodiscard]] Value *negate(Value *V, bool IsNSW, unsigned Depth);
796
797 /// Recurse depth-first and attempt to sink the negation.
798 /// FIXME: use worklist?
799 [[nodiscard]] std::optional<Result> run(Value *Root, bool IsNSW);
800
801 Negator(const Negator &) = delete;
802 Negator(Negator &&) = delete;
803 Negator &operator=(const Negator &) = delete;
804 Negator &operator=(Negator &&) = delete;
805
806public:
807 /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed,
808 /// otherwise returns negated value.
809 [[nodiscard]] static Value *Negate(bool LHSIsZero, bool IsNSW, Value *Root,
810 InstCombinerImpl &IC);
811};
812
813} // end namespace llvm
814
815#undef DEBUG_TYPE
816
817#endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static bool foldICmpWithDominatingICmp(CmpInst *Cmp, const TargetLowering &TLI)
For pattern like:
#define LLVM_LIBRARY_VISIBILITY
Definition: Compiler.h:131
static bool willNotOverflow(BinaryOpIntrinsic *BO, LazyValueInfo *LVI)
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Align
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
Hexagon Common GEP
IRTranslator LLVM IR MI
static constexpr unsigned NegatorMaxNodesSSO
static constexpr unsigned NegatorDefaultMaxDepth
This file provides the interface for the instcombine pass implementation.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
StandardInstrumentations SI(Mod->getContext(), Debug, VerifyEach)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const AddOperator *Add, const SimplifyQuery &SQ)
Value * RHS
Value * LHS
BinaryOperator * Mul
support::ulittle16_t & Lo
Definition: aarch32.cpp:206
support::ulittle16_t & Hi
Definition: aarch32.cpp:205
static const uint32_t IV[8]
Definition: blake3_impl.h:78
Class for arbitrary precision integers.
Definition: APInt.h:76
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
This class represents any memset intrinsic.
A cache of @llvm.assume calls within a function.
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:727
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:173
This class represents a no-op cast from one type to another.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1259
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:483
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:770
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:780
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
This instruction extracts a single (scalar) element from a VectorType value.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
An instruction for ordering other memory operations.
Definition: Instructions.h:445
This class represents a freeze function that returns random concrete value if an operand is either a ...
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:949
This instruction compares its operands according to the predicate given to the constructor.
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
bool fmulByZeroIsZero(Value *MulVal, FastMathFlags FMF, const Instruction *CtxI) const
Check if fmul MulVal, +0.0 will yield +0.0 (or signed zero is ignorable).
virtual ~InstCombinerImpl()=default
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Instruction * foldVectorSelect(SelectInst &Sel)
Instruction * foldSelectValueEquivalence(SelectInst &SI, ICmpInst &ICI)
Instruction * foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, Value *A, Value *B, Instruction &Outer, SelectPatternFlavor SPF2, Value *C)
Constant * getLosslessUnsignedTrunc(Constant *C, Type *TruncTy)
bool replaceInInstruction(Value *V, Value *Old, Value *New, unsigned Depth=0)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI)
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Instruction * visitInstruction(Instruction &I)
Specify what to return for unhandled instructions.
KnownFPClass computeKnownFPClass(Value *Val, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Constant * getLosslessSignedTrunc(Constant *C, Type *TruncTy)
InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder, bool MinimizeSize, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Instruction * visitSelectInst(SelectInst &SI)
Instruction * foldSelectOfBools(SelectInst &SI)
Instruction * foldSelectExtConst(SelectInst &Sel)
The core instruction combiner logic.
Definition: InstCombiner.h:47
Base class for instruction visitors.
Definition: InstVisitor.h:78
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
This class represents a cast from an integer to a pointer.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
Definition: Instructions.h:178
This class represents min/max intrinsics.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
The optimization diagnostic interface.
Analysis providing profile information.
This class represents a cast from a pointer to an integer.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:302
Multiway switch.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
This function has undefined behavior.
This represents the llvm.va_end intrinsic.
LLVM Value Representation.
Definition: Value.h:74
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
This class represents zero extension of integer types.
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
OverflowResult
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1582
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
SelectPatternFlavor
Specific patterns of select instructions we can match.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.