LLVM 20.0.0git
AggressiveInstCombine.cpp
Go to the documentation of this file.
1//===- AggressiveInstCombine.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the aggressive expression pattern combiner classes.
10// Currently, it handles expression patterns for:
11// * Truncate instruction
12//
13//===----------------------------------------------------------------------===//
14
17#include "llvm/ADT/Statistic.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/IRBuilder.h"
35
36using namespace llvm;
37using namespace PatternMatch;
38
39#define DEBUG_TYPE "aggressive-instcombine"
40
41STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
42STATISTIC(NumGuardedRotates,
43 "Number of guarded rotates transformed into funnel shifts");
44STATISTIC(NumGuardedFunnelShifts,
45 "Number of guarded funnel shifts transformed into funnel shifts");
46STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
47
49 "aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden,
50 cl::desc("Max number of instructions to scan for aggressive instcombine."));
51
53 "strncmp-inline-threshold", cl::init(3), cl::Hidden,
54 cl::desc("The maximum length of a constant string for a builtin string cmp "
55 "call eligible for inlining. The default value is 3."));
56
58 MemChrInlineThreshold("memchr-inline-threshold", cl::init(3), cl::Hidden,
59 cl::desc("The maximum length of a constant string to "
60 "inline a memchr call."));
61
62/// Match a pattern for a bitwise funnel/rotate operation that partially guards
63/// against undefined behavior by branching around the funnel-shift/rotation
64/// when the shift amount is 0.
66 if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
67 return false;
68
69 // As with the one-use checks below, this is not strictly necessary, but we
70 // are being cautious to avoid potential perf regressions on targets that
71 // do not actually have a funnel/rotate instruction (where the funnel shift
72 // would be expanded back into math/shift/logic ops).
73 if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
74 return false;
75
76 // Match V to funnel shift left/right and capture the source operands and
77 // shift amount.
78 auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
79 Value *&ShAmt) {
80 unsigned Width = V->getType()->getScalarSizeInBits();
81
82 // fshl(ShVal0, ShVal1, ShAmt)
83 // == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
84 if (match(V, m_OneUse(m_c_Or(
85 m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
86 m_LShr(m_Value(ShVal1),
87 m_Sub(m_SpecificInt(Width), m_Deferred(ShAmt))))))) {
88 return Intrinsic::fshl;
89 }
90
91 // fshr(ShVal0, ShVal1, ShAmt)
92 // == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
93 if (match(V,
95 m_Value(ShAmt))),
96 m_LShr(m_Value(ShVal1), m_Deferred(ShAmt)))))) {
97 return Intrinsic::fshr;
98 }
99
101 };
102
103 // One phi operand must be a funnel/rotate operation, and the other phi
104 // operand must be the source value of that funnel/rotate operation:
105 // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
106 // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
107 // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
108 PHINode &Phi = cast<PHINode>(I);
109 unsigned FunnelOp = 0, GuardOp = 1;
110 Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
111 Value *ShVal0, *ShVal1, *ShAmt;
112 Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
113 if (IID == Intrinsic::not_intrinsic ||
114 (IID == Intrinsic::fshl && ShVal0 != P1) ||
115 (IID == Intrinsic::fshr && ShVal1 != P1)) {
116 IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
117 if (IID == Intrinsic::not_intrinsic ||
118 (IID == Intrinsic::fshl && ShVal0 != P0) ||
119 (IID == Intrinsic::fshr && ShVal1 != P0))
120 return false;
121 assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
122 "Pattern must match funnel shift left or right");
123 std::swap(FunnelOp, GuardOp);
124 }
125
126 // The incoming block with our source operand must be the "guard" block.
127 // That must contain a cmp+branch to avoid the funnel/rotate when the shift
128 // amount is equal to 0. The other incoming block is the block with the
129 // funnel/rotate.
130 BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
131 BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
132 Instruction *TermI = GuardBB->getTerminator();
133
134 // Ensure that the shift values dominate each block.
135 if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
136 return false;
137
138 BasicBlock *PhiBB = Phi.getParent();
140 m_ZeroInt()),
141 m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
142 return false;
143
144 IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
145
146 if (ShVal0 == ShVal1)
147 ++NumGuardedRotates;
148 else
149 ++NumGuardedFunnelShifts;
150
151 // If this is not a rotate then the select was blocking poison from the
152 // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
153 bool IsFshl = IID == Intrinsic::fshl;
154 if (ShVal0 != ShVal1) {
155 if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
156 ShVal1 = Builder.CreateFreeze(ShVal1);
157 else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
158 ShVal0 = Builder.CreateFreeze(ShVal0);
159 }
160
161 // We matched a variation of this IR pattern:
162 // GuardBB:
163 // %cmp = icmp eq i32 %ShAmt, 0
164 // br i1 %cmp, label %PhiBB, label %FunnelBB
165 // FunnelBB:
166 // %sub = sub i32 32, %ShAmt
167 // %shr = lshr i32 %ShVal1, %sub
168 // %shl = shl i32 %ShVal0, %ShAmt
169 // %fsh = or i32 %shr, %shl
170 // br label %PhiBB
171 // PhiBB:
172 // %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
173 // -->
174 // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
175 Phi.replaceAllUsesWith(
176 Builder.CreateIntrinsic(IID, Phi.getType(), {ShVal0, ShVal1, ShAmt}));
177 return true;
178}
179
180/// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
181/// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
182/// of 'and' ops, then we also need to capture the fact that we saw an
183/// "and X, 1", so that's an extra return value for that case.
184struct MaskOps {
185 Value *Root = nullptr;
188 bool FoundAnd1 = false;
189
190 MaskOps(unsigned BitWidth, bool MatchAnds)
191 : Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds) {}
192};
193
194/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
195/// chain of 'and' or 'or' instructions looking for shift ops of a common source
196/// value. Examples:
197/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
198/// returns { X, 0x129 }
199/// and (and (X >> 1), 1), (X >> 4)
200/// returns { X, 0x12 }
201static bool matchAndOrChain(Value *V, MaskOps &MOps) {
202 Value *Op0, *Op1;
203 if (MOps.MatchAndChain) {
204 // Recurse through a chain of 'and' operands. This requires an extra check
205 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
206 // in the chain to know that all of the high bits are cleared.
207 if (match(V, m_And(m_Value(Op0), m_One()))) {
208 MOps.FoundAnd1 = true;
209 return matchAndOrChain(Op0, MOps);
210 }
211 if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
212 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
213 } else {
214 // Recurse through a chain of 'or' operands.
215 if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
216 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
217 }
218
219 // We need a shift-right or a bare value representing a compare of bit 0 of
220 // the original source operand.
221 Value *Candidate;
222 const APInt *BitIndex = nullptr;
223 if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
224 Candidate = V;
225
226 // Initialize result source operand.
227 if (!MOps.Root)
228 MOps.Root = Candidate;
229
230 // The shift constant is out-of-range? This code hasn't been simplified.
231 if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
232 return false;
233
234 // Fill in the mask bit derived from the shift constant.
235 MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
236 return MOps.Root == Candidate;
237}
238
239/// Match patterns that correspond to "any-bits-set" and "all-bits-set".
240/// These will include a chain of 'or' or 'and'-shifted bits from a
241/// common source value:
242/// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
243/// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
244/// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
245/// that differ only with a final 'not' of the result. We expect that final
246/// 'not' to be folded with the compare that we create here (invert predicate).
248 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
249 // final "and X, 1" instruction must be the final op in the sequence.
250 bool MatchAllBitsSet;
252 MatchAllBitsSet = true;
253 else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
254 MatchAllBitsSet = false;
255 else
256 return false;
257
258 MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
259 if (MatchAllBitsSet) {
260 if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
261 return false;
262 } else {
263 if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
264 return false;
265 }
266
267 // The pattern was found. Create a masked compare that replaces all of the
268 // shift and logic ops.
269 IRBuilder<> Builder(&I);
270 Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
271 Value *And = Builder.CreateAnd(MOps.Root, Mask);
272 Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
273 : Builder.CreateIsNotNull(And);
274 Value *Zext = Builder.CreateZExt(Cmp, I.getType());
275 I.replaceAllUsesWith(Zext);
276 ++NumAnyOrAllBitsSet;
277 return true;
278}
279
280// Try to recognize below function as popcount intrinsic.
281// This is the "best" algorithm from
282// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
283// Also used in TargetLowering::expandCTPOP().
284//
285// int popcount(unsigned int i) {
286// i = i - ((i >> 1) & 0x55555555);
287// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
288// i = ((i + (i >> 4)) & 0x0F0F0F0F);
289// return (i * 0x01010101) >> 24;
290// }
292 if (I.getOpcode() != Instruction::LShr)
293 return false;
294
295 Type *Ty = I.getType();
296 if (!Ty->isIntOrIntVectorTy())
297 return false;
298
299 unsigned Len = Ty->getScalarSizeInBits();
300 // FIXME: fix Len == 8 and other irregular type lengths.
301 if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
302 return false;
303
304 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
305 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
306 APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
307 APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
308 APInt MaskShift = APInt(Len, Len - 8);
309
310 Value *Op0 = I.getOperand(0);
311 Value *Op1 = I.getOperand(1);
312 Value *MulOp0;
313 // Matching "(i * 0x01010101...) >> 24".
314 if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
316 Value *ShiftOp0;
317 // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
318 if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
319 m_Deferred(ShiftOp0)),
320 m_SpecificInt(Mask0F)))) {
321 Value *AndOp0;
322 // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
323 if (match(ShiftOp0,
324 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
326 m_SpecificInt(Mask33))))) {
327 Value *Root, *SubOp1;
328 // Matching "i - ((i >> 1) & 0x55555555...)".
329 if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
330 match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
331 m_SpecificInt(Mask55)))) {
332 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
333 IRBuilder<> Builder(&I);
334 I.replaceAllUsesWith(
335 Builder.CreateIntrinsic(Intrinsic::ctpop, I.getType(), {Root}));
336 ++NumPopCountRecognized;
337 return true;
338 }
339 }
340 }
341 }
342
343 return false;
344}
345
346/// Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and
347/// C2 saturate the value of the fp conversion. The transform is not reversable
348/// as the fptosi.sat is more defined than the input - all values produce a
349/// valid value for the fptosi.sat, where as some produce poison for original
350/// that were out of range of the integer conversion. The reversed pattern may
351/// use fmax and fmin instead. As we cannot directly reverse the transform, and
352/// it is not always profitable, we make it conditional on the cost being
353/// reported as lower by TTI.
355 // Look for min(max(fptosi, converting to fptosi_sat.
356 Value *In;
357 const APInt *MinC, *MaxC;
359 m_APInt(MinC))),
360 m_APInt(MaxC))) &&
362 m_APInt(MaxC))),
363 m_APInt(MinC))))
364 return false;
365
366 // Check that the constants clamp a saturate.
367 if (!(*MinC + 1).isPowerOf2() || -*MaxC != *MinC + 1)
368 return false;
369
370 Type *IntTy = I.getType();
371 Type *FpTy = In->getType();
372 Type *SatTy =
373 IntegerType::get(IntTy->getContext(), (*MinC + 1).exactLogBase2() + 1);
374 if (auto *VecTy = dyn_cast<VectorType>(IntTy))
375 SatTy = VectorType::get(SatTy, VecTy->getElementCount());
376
377 // Get the cost of the intrinsic, and check that against the cost of
378 // fptosi+smin+smax
380 IntrinsicCostAttributes(Intrinsic::fptosi_sat, SatTy, {In}, {FpTy}),
382 SatCost += TTI.getCastInstrCost(Instruction::SExt, IntTy, SatTy,
385
387 Instruction::FPToSI, IntTy, FpTy, TTI::CastContextHint::None,
389 MinMaxCost += TTI.getIntrinsicInstrCost(
390 IntrinsicCostAttributes(Intrinsic::smin, IntTy, {IntTy}),
392 MinMaxCost += TTI.getIntrinsicInstrCost(
393 IntrinsicCostAttributes(Intrinsic::smax, IntTy, {IntTy}),
395
396 if (SatCost >= MinMaxCost)
397 return false;
398
399 IRBuilder<> Builder(&I);
400 Value *Sat =
401 Builder.CreateIntrinsic(Intrinsic::fptosi_sat, {SatTy, FpTy}, In);
402 I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));
403 return true;
404}
405
406/// Try to replace a mathlib call to sqrt with the LLVM intrinsic. This avoids
407/// pessimistic codegen that has to account for setting errno and can enable
408/// vectorization.
411 DominatorTree &DT) {
412 // If (1) this is a sqrt libcall, (2) we can assume that NAN is not created
413 // (because NNAN or the operand arg must not be less than -0.0) and (2) we
414 // would not end up lowering to a libcall anyway (which could change the value
415 // of errno), then:
416 // (1) errno won't be set.
417 // (2) it is safe to convert this to an intrinsic call.
418 Type *Ty = Call->getType();
419 Value *Arg = Call->getArgOperand(0);
420 if (TTI.haveFastSqrt(Ty) &&
421 (Call->hasNoNaNs() ||
423 Arg, 0,
424 SimplifyQuery(Call->getDataLayout(), &TLI, &DT, &AC, Call)))) {
425 IRBuilder<> Builder(Call);
427 Builder.setFastMathFlags(Call->getFastMathFlags());
428
429 Value *NewSqrt = Builder.CreateIntrinsic(Intrinsic::sqrt, Ty, Arg,
430 /*FMFSource=*/nullptr, "sqrt");
431 Call->replaceAllUsesWith(NewSqrt);
432
433 // Explicitly erase the old call because a call with side effects is not
434 // trivially dead.
435 Call->eraseFromParent();
436 return true;
437 }
438
439 return false;
440}
441
442// Check if this array of constants represents a cttz table.
443// Iterate over the elements from \p Table by trying to find/match all
444// the numbers from 0 to \p InputBits that should represent cttz results.
445static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
446 uint64_t Shift, uint64_t InputBits) {
447 unsigned Length = Table.getNumElements();
448 if (Length < InputBits || Length > InputBits * 2)
449 return false;
450
451 APInt Mask = APInt::getBitsSetFrom(InputBits, Shift);
452 unsigned Matched = 0;
453
454 for (unsigned i = 0; i < Length; i++) {
455 uint64_t Element = Table.getElementAsInteger(i);
456 if (Element >= InputBits)
457 continue;
458
459 // Check if \p Element matches a concrete answer. It could fail for some
460 // elements that are never accessed, so we keep iterating over each element
461 // from the table. The number of matched elements should be equal to the
462 // number of potential right answers which is \p InputBits actually.
463 if ((((Mul << Element) & Mask.getZExtValue()) >> Shift) == i)
464 Matched++;
465 }
466
467 return Matched == InputBits;
468}
469
470// Try to recognize table-based ctz implementation.
471// E.g., an example in C (for more cases please see the llvm/tests):
472// int f(unsigned x) {
473// static const char table[32] =
474// {0, 1, 28, 2, 29, 14, 24, 3, 30,
475// 22, 20, 15, 25, 17, 4, 8, 31, 27,
476// 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
477// return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27];
478// }
479// this can be lowered to `cttz` instruction.
480// There is also a special case when the element is 0.
481//
482// Here are some examples or LLVM IR for a 64-bit target:
483//
484// CASE 1:
485// %sub = sub i32 0, %x
486// %and = and i32 %sub, %x
487// %mul = mul i32 %and, 125613361
488// %shr = lshr i32 %mul, 27
489// %idxprom = zext i32 %shr to i64
490// %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz1.table, i64 0,
491// i64 %idxprom
492// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
493//
494// CASE 2:
495// %sub = sub i32 0, %x
496// %and = and i32 %sub, %x
497// %mul = mul i32 %and, 72416175
498// %shr = lshr i32 %mul, 26
499// %idxprom = zext i32 %shr to i64
500// %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @ctz2.table,
501// i64 0, i64 %idxprom
502// %0 = load i16, i16* %arrayidx, align 2, !tbaa !8
503//
504// CASE 3:
505// %sub = sub i32 0, %x
506// %and = and i32 %sub, %x
507// %mul = mul i32 %and, 81224991
508// %shr = lshr i32 %mul, 27
509// %idxprom = zext i32 %shr to i64
510// %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz3.table,
511// i64 0, i64 %idxprom
512// %0 = load i32, i32* %arrayidx, align 4, !tbaa !8
513//
514// CASE 4:
515// %sub = sub i64 0, %x
516// %and = and i64 %sub, %x
517// %mul = mul i64 %and, 283881067100198605
518// %shr = lshr i64 %mul, 58
519// %arrayidx = getelementptr inbounds [64 x i8], [64 x i8]* @table, i64 0,
520// i64 %shr
521// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
522//
523// All this can be lowered to @llvm.cttz.i32/64 intrinsic.
525 LoadInst *LI = dyn_cast<LoadInst>(&I);
526 if (!LI)
527 return false;
528
529 Type *AccessType = LI->getType();
530 if (!AccessType->isIntegerTy())
531 return false;
532
533 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getPointerOperand());
534 if (!GEP || !GEP->isInBounds() || GEP->getNumIndices() != 2)
535 return false;
536
537 if (!GEP->getSourceElementType()->isArrayTy())
538 return false;
539
540 uint64_t ArraySize = GEP->getSourceElementType()->getArrayNumElements();
541 if (ArraySize != 32 && ArraySize != 64)
542 return false;
543
544 GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
545 if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
546 return false;
547
548 ConstantDataArray *ConstData =
549 dyn_cast<ConstantDataArray>(GVTable->getInitializer());
550 if (!ConstData)
551 return false;
552
553 if (!match(GEP->idx_begin()->get(), m_ZeroInt()))
554 return false;
555
556 Value *Idx2 = std::next(GEP->idx_begin())->get();
557 Value *X1;
558 uint64_t MulConst, ShiftConst;
559 // FIXME: 64-bit targets have `i64` type for the GEP index, so this match will
560 // probably fail for other (e.g. 32-bit) targets.
561 if (!match(Idx2, m_ZExtOrSelf(
563 m_ConstantInt(MulConst)),
564 m_ConstantInt(ShiftConst)))))
565 return false;
566
567 unsigned InputBits = X1->getType()->getScalarSizeInBits();
568 if (InputBits != 32 && InputBits != 64)
569 return false;
570
571 // Shift should extract top 5..7 bits.
572 if (InputBits - Log2_32(InputBits) != ShiftConst &&
573 InputBits - Log2_32(InputBits) - 1 != ShiftConst)
574 return false;
575
576 if (!isCTTZTable(*ConstData, MulConst, ShiftConst, InputBits))
577 return false;
578
579 auto ZeroTableElem = ConstData->getElementAsInteger(0);
580 bool DefinedForZero = ZeroTableElem == InputBits;
581
582 IRBuilder<> B(LI);
583 ConstantInt *BoolConst = B.getInt1(!DefinedForZero);
584 Type *XType = X1->getType();
585 auto Cttz = B.CreateIntrinsic(Intrinsic::cttz, {XType}, {X1, BoolConst});
586 Value *ZExtOrTrunc = nullptr;
587
588 if (DefinedForZero) {
589 ZExtOrTrunc = B.CreateZExtOrTrunc(Cttz, AccessType);
590 } else {
591 // If the value in elem 0 isn't the same as InputBits, we still want to
592 // produce the value from the table.
593 auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0));
594 auto Select =
595 B.CreateSelect(Cmp, ConstantInt::get(XType, ZeroTableElem), Cttz);
596
597 // NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target
598 // it should be handled as: `cttz(x) & (typeSize - 1)`.
599
600 ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType);
601 }
602
603 LI->replaceAllUsesWith(ZExtOrTrunc);
604
605 return true;
606}
607
608/// This is used by foldLoadsRecursive() to capture a Root Load node which is
609/// of type or(load, load) and recursively build the wide load. Also capture the
610/// shift amount, zero extend type and loadSize.
611struct LoadOps {
612 LoadInst *Root = nullptr;
614 bool FoundRoot = false;
616 const APInt *Shift = nullptr;
619};
620
621// Identify and Merge consecutive loads recursively which is of the form
622// (ZExt(L1) << shift1) | (ZExt(L2) << shift2) -> ZExt(L3) << shift1
623// (ZExt(L1) << shift1) | ZExt(L2) -> ZExt(L3)
624static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL,
625 AliasAnalysis &AA) {
626 const APInt *ShAmt2 = nullptr;
627 Value *X;
628 Instruction *L1, *L2;
629
630 // Go to the last node with loads.
631 if (match(V, m_OneUse(m_c_Or(
632 m_Value(X),
634 m_APInt(ShAmt2)))))) ||
637 if (!foldLoadsRecursive(X, LOps, DL, AA) && LOps.FoundRoot)
638 // Avoid Partial chain merge.
639 return false;
640 } else
641 return false;
642
643 // Check if the pattern has loads
644 LoadInst *LI1 = LOps.Root;
645 const APInt *ShAmt1 = LOps.Shift;
646 if (LOps.FoundRoot == false &&
649 m_APInt(ShAmt1)))))) {
650 LI1 = dyn_cast<LoadInst>(L1);
651 }
652 LoadInst *LI2 = dyn_cast<LoadInst>(L2);
653
654 // Check if loads are same, atomic, volatile and having same address space.
655 if (LI1 == LI2 || !LI1 || !LI2 || !LI1->isSimple() || !LI2->isSimple() ||
657 return false;
658
659 // Check if Loads come from same BB.
660 if (LI1->getParent() != LI2->getParent())
661 return false;
662
663 // Find the data layout
664 bool IsBigEndian = DL.isBigEndian();
665
666 // Check if loads are consecutive and same size.
667 Value *Load1Ptr = LI1->getPointerOperand();
668 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
669 Load1Ptr =
670 Load1Ptr->stripAndAccumulateConstantOffsets(DL, Offset1,
671 /* AllowNonInbounds */ true);
672
673 Value *Load2Ptr = LI2->getPointerOperand();
674 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0);
675 Load2Ptr =
676 Load2Ptr->stripAndAccumulateConstantOffsets(DL, Offset2,
677 /* AllowNonInbounds */ true);
678
679 // Verify if both loads have same base pointers and load sizes are same.
680 uint64_t LoadSize1 = LI1->getType()->getPrimitiveSizeInBits();
681 uint64_t LoadSize2 = LI2->getType()->getPrimitiveSizeInBits();
682 if (Load1Ptr != Load2Ptr || LoadSize1 != LoadSize2)
683 return false;
684
685 // Support Loadsizes greater or equal to 8bits and only power of 2.
686 if (LoadSize1 < 8 || !isPowerOf2_64(LoadSize1))
687 return false;
688
689 // Alias Analysis to check for stores b/w the loads.
690 LoadInst *Start = LOps.FoundRoot ? LOps.RootInsert : LI1, *End = LI2;
691 MemoryLocation Loc;
692 if (!Start->comesBefore(End)) {
693 std::swap(Start, End);
695 if (LOps.FoundRoot)
696 Loc = Loc.getWithNewSize(LOps.LoadSize);
697 } else
699 unsigned NumScanned = 0;
700 for (Instruction &Inst :
701 make_range(Start->getIterator(), End->getIterator())) {
702 if (Inst.mayWriteToMemory() && isModSet(AA.getModRefInfo(&Inst, Loc)))
703 return false;
704
705 // Ignore debug info so that's not counted against MaxInstrsToScan.
706 // Otherwise debug info could affect codegen.
707 if (!isa<DbgInfoIntrinsic>(Inst) && ++NumScanned > MaxInstrsToScan)
708 return false;
709 }
710
711 // Make sure Load with lower Offset is at LI1
712 bool Reverse = false;
713 if (Offset2.slt(Offset1)) {
714 std::swap(LI1, LI2);
715 std::swap(ShAmt1, ShAmt2);
716 std::swap(Offset1, Offset2);
717 std::swap(Load1Ptr, Load2Ptr);
718 std::swap(LoadSize1, LoadSize2);
719 Reverse = true;
720 }
721
722 // Big endian swap the shifts
723 if (IsBigEndian)
724 std::swap(ShAmt1, ShAmt2);
725
726 // Find Shifts values.
727 uint64_t Shift1 = 0, Shift2 = 0;
728 if (ShAmt1)
729 Shift1 = ShAmt1->getZExtValue();
730 if (ShAmt2)
731 Shift2 = ShAmt2->getZExtValue();
732
733 // First load is always LI1. This is where we put the new load.
734 // Use the merged load size available from LI1 for forward loads.
735 if (LOps.FoundRoot) {
736 if (!Reverse)
737 LoadSize1 = LOps.LoadSize;
738 else
739 LoadSize2 = LOps.LoadSize;
740 }
741
742 // Verify if shift amount and load index aligns and verifies that loads
743 // are consecutive.
744 uint64_t ShiftDiff = IsBigEndian ? LoadSize2 : LoadSize1;
745 uint64_t PrevSize =
746 DL.getTypeStoreSize(IntegerType::get(LI1->getContext(), LoadSize1));
747 if ((Shift2 - Shift1) != ShiftDiff || (Offset2 - Offset1) != PrevSize)
748 return false;
749
750 // Update LOps
751 AAMDNodes AATags1 = LOps.AATags;
752 AAMDNodes AATags2 = LI2->getAAMetadata();
753 if (LOps.FoundRoot == false) {
754 LOps.FoundRoot = true;
755 AATags1 = LI1->getAAMetadata();
756 }
757 LOps.LoadSize = LoadSize1 + LoadSize2;
758 LOps.RootInsert = Start;
759
760 // Concatenate the AATags of the Merged Loads.
761 LOps.AATags = AATags1.concat(AATags2);
762
763 LOps.Root = LI1;
764 LOps.Shift = ShAmt1;
765 LOps.ZextType = X->getType();
766 return true;
767}
768
769// For a given BB instruction, evaluate all loads in the chain that form a
770// pattern which suggests that the loads can be combined. The one and only use
771// of the loads is to form a wider load.
774 const DominatorTree &DT) {
775 // Only consider load chains of scalar values.
776 if (isa<VectorType>(I.getType()))
777 return false;
778
779 LoadOps LOps;
780 if (!foldLoadsRecursive(&I, LOps, DL, AA) || !LOps.FoundRoot)
781 return false;
782
783 IRBuilder<> Builder(&I);
784 LoadInst *NewLoad = nullptr, *LI1 = LOps.Root;
785
786 IntegerType *WiderType = IntegerType::get(I.getContext(), LOps.LoadSize);
787 // TTI based checks if we want to proceed with wider load
788 bool Allowed = TTI.isTypeLegal(WiderType);
789 if (!Allowed)
790 return false;
791
792 unsigned AS = LI1->getPointerAddressSpace();
793 unsigned Fast = 0;
794 Allowed = TTI.allowsMisalignedMemoryAccesses(I.getContext(), LOps.LoadSize,
795 AS, LI1->getAlign(), &Fast);
796 if (!Allowed || !Fast)
797 return false;
798
799 // Get the Index and Ptr for the new GEP.
800 Value *Load1Ptr = LI1->getPointerOperand();
801 Builder.SetInsertPoint(LOps.RootInsert);
802 if (!DT.dominates(Load1Ptr, LOps.RootInsert)) {
803 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
804 Load1Ptr = Load1Ptr->stripAndAccumulateConstantOffsets(
805 DL, Offset1, /* AllowNonInbounds */ true);
806 Load1Ptr = Builder.CreatePtrAdd(Load1Ptr, Builder.getInt(Offset1));
807 }
808 // Generate wider load.
809 NewLoad = Builder.CreateAlignedLoad(WiderType, Load1Ptr, LI1->getAlign(),
810 LI1->isVolatile(), "");
811 NewLoad->takeName(LI1);
812 // Set the New Load AATags Metadata.
813 if (LOps.AATags)
814 NewLoad->setAAMetadata(LOps.AATags);
815
816 Value *NewOp = NewLoad;
817 // Check if zero extend needed.
818 if (LOps.ZextType)
819 NewOp = Builder.CreateZExt(NewOp, LOps.ZextType);
820
821 // Check if shift needed. We need to shift with the amount of load1
822 // shift if not zero.
823 if (LOps.Shift)
824 NewOp = Builder.CreateShl(NewOp, ConstantInt::get(I.getContext(), *LOps.Shift));
825 I.replaceAllUsesWith(NewOp);
826
827 return true;
828}
829
830// Calculate GEP Stride and accumulated const ModOffset. Return Stride and
831// ModOffset
832static std::pair<APInt, APInt>
834 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
835 std::optional<APInt> Stride;
836 APInt ModOffset(BW, 0);
837 // Return a minimum gep stride, greatest common divisor of consective gep
838 // index scales(c.f. Bézout's identity).
839 while (auto *GEP = dyn_cast<GEPOperator>(PtrOp)) {
841 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset))
842 break;
843
844 for (auto [V, Scale] : VarOffsets) {
845 // Only keep a power of two factor for non-inbounds
846 if (!GEP->isInBounds())
847 Scale = APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
848
849 if (!Stride)
850 Stride = Scale;
851 else
852 Stride = APIntOps::GreatestCommonDivisor(*Stride, Scale);
853 }
854
855 PtrOp = GEP->getPointerOperand();
856 }
857
858 // Check whether pointer arrives back at Global Variable via at least one GEP.
859 // Even if it doesn't, we can check by alignment.
860 if (!isa<GlobalVariable>(PtrOp) || !Stride)
861 return {APInt(BW, 1), APInt(BW, 0)};
862
863 // In consideration of signed GEP indices, non-negligible offset become
864 // remainder of division by minimum GEP stride.
865 ModOffset = ModOffset.srem(*Stride);
866 if (ModOffset.isNegative())
867 ModOffset += *Stride;
868
869 return {*Stride, ModOffset};
870}
871
872/// If C is a constant patterned array and all valid loaded results for given
873/// alignment are same to a constant, return that constant.
875 auto *LI = dyn_cast<LoadInst>(&I);
876 if (!LI || LI->isVolatile())
877 return false;
878
879 // We can only fold the load if it is from a constant global with definitive
880 // initializer. Skip expensive logic if this is not the case.
881 auto *PtrOp = LI->getPointerOperand();
882 auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp));
883 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
884 return false;
885
886 // Bail for large initializers in excess of 4K to avoid too many scans.
887 Constant *C = GV->getInitializer();
888 uint64_t GVSize = DL.getTypeAllocSize(C->getType());
889 if (!GVSize || 4096 < GVSize)
890 return false;
891
892 Type *LoadTy = LI->getType();
893 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
894 auto [Stride, ConstOffset] = getStrideAndModOffsetOfGEP(PtrOp, DL);
895
896 // Any possible offset could be multiple of GEP stride. And any valid
897 // offset is multiple of load alignment, so checking only multiples of bigger
898 // one is sufficient to say results' equality.
899 if (auto LA = LI->getAlign();
900 LA <= GV->getAlign().valueOrOne() && Stride.getZExtValue() < LA.value()) {
901 ConstOffset = APInt(BW, 0);
902 Stride = APInt(BW, LA.value());
903 }
904
905 Constant *Ca = ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL);
906 if (!Ca)
907 return false;
908
909 unsigned E = GVSize - DL.getTypeStoreSize(LoadTy);
910 for (; ConstOffset.getZExtValue() <= E; ConstOffset += Stride)
911 if (Ca != ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL))
912 return false;
913
914 I.replaceAllUsesWith(Ca);
915
916 return true;
917}
918
919namespace {
920class StrNCmpInliner {
921public:
922 StrNCmpInliner(CallInst *CI, LibFunc Func, DomTreeUpdater *DTU,
923 const DataLayout &DL)
924 : CI(CI), Func(Func), DTU(DTU), DL(DL) {}
925
926 bool optimizeStrNCmp();
927
928private:
929 void inlineCompare(Value *LHS, StringRef RHS, uint64_t N, bool Swapped);
930
931 CallInst *CI;
933 DomTreeUpdater *DTU;
934 const DataLayout &DL;
935};
936
937} // namespace
938
939/// First we normalize calls to strncmp/strcmp to the form of
940/// compare(s1, s2, N), which means comparing first N bytes of s1 and s2
941/// (without considering '\0').
942///
943/// Examples:
944///
945/// \code
946/// strncmp(s, "a", 3) -> compare(s, "a", 2)
947/// strncmp(s, "abc", 3) -> compare(s, "abc", 3)
948/// strncmp(s, "a\0b", 3) -> compare(s, "a\0b", 2)
949/// strcmp(s, "a") -> compare(s, "a", 2)
950///
951/// char s2[] = {'a'}
952/// strncmp(s, s2, 3) -> compare(s, s2, 3)
953///
954/// char s2[] = {'a', 'b', 'c', 'd'}
955/// strncmp(s, s2, 3) -> compare(s, s2, 3)
956/// \endcode
957///
958/// We only handle cases where N and exactly one of s1 and s2 are constant.
959/// Cases that s1 and s2 are both constant are already handled by the
960/// instcombine pass.
961///
962/// We do not handle cases where N > StrNCmpInlineThreshold.
963///
964/// We also do not handles cases where N < 2, which are already
965/// handled by the instcombine pass.
966///
967bool StrNCmpInliner::optimizeStrNCmp() {
969 return false;
970
972 return false;
973
974 Value *Str1P = CI->getArgOperand(0);
975 Value *Str2P = CI->getArgOperand(1);
976 // Should be handled elsewhere.
977 if (Str1P == Str2P)
978 return false;
979
980 StringRef Str1, Str2;
981 bool HasStr1 = getConstantStringInfo(Str1P, Str1, /*TrimAtNul=*/false);
982 bool HasStr2 = getConstantStringInfo(Str2P, Str2, /*TrimAtNul=*/false);
983 if (HasStr1 == HasStr2)
984 return false;
985
986 // Note that '\0' and characters after it are not trimmed.
987 StringRef Str = HasStr1 ? Str1 : Str2;
988 Value *StrP = HasStr1 ? Str2P : Str1P;
989
990 size_t Idx = Str.find('\0');
992 if (Func == LibFunc_strncmp) {
993 if (auto *ConstInt = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
994 N = std::min(N, ConstInt->getZExtValue());
995 else
996 return false;
997 }
998 // Now N means how many bytes we need to compare at most.
999 if (N > Str.size() || N < 2 || N > StrNCmpInlineThreshold)
1000 return false;
1001
1002 // Cases where StrP has two or more dereferenceable bytes might be better
1003 // optimized elsewhere.
1004 bool CanBeNull = false, CanBeFreed = false;
1005 if (StrP->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed) > 1)
1006 return false;
1007 inlineCompare(StrP, Str, N, HasStr1);
1008 return true;
1009}
1010
1011/// Convert
1012///
1013/// \code
1014/// ret = compare(s1, s2, N)
1015/// \endcode
1016///
1017/// into
1018///
1019/// \code
1020/// ret = (int)s1[0] - (int)s2[0]
1021/// if (ret != 0)
1022/// goto NE
1023/// ...
1024/// ret = (int)s1[N-2] - (int)s2[N-2]
1025/// if (ret != 0)
1026/// goto NE
1027/// ret = (int)s1[N-1] - (int)s2[N-1]
1028/// NE:
1029/// \endcode
1030///
1031/// CFG before and after the transformation:
1032///
1033/// (before)
1034/// BBCI
1035///
1036/// (after)
1037/// BBCI -> BBSubs[0] (sub,icmp) --NE-> BBNE -> BBTail
1038/// | ^
1039/// E |
1040/// | |
1041/// BBSubs[1] (sub,icmp) --NE-----+
1042/// ... |
1043/// BBSubs[N-1] (sub) ---------+
1044///
1045void StrNCmpInliner::inlineCompare(Value *LHS, StringRef RHS, uint64_t N,
1046 bool Swapped) {
1047 auto &Ctx = CI->getContext();
1048 IRBuilder<> B(Ctx);
1049 // We want these instructions to be recognized as inlined instructions for the
1050 // compare call, but we don't have a source location for the definition of
1051 // that function, since we're generating that code now. Because the generated
1052 // code is a viable point for a memory access error, we make the pragmatic
1053 // choice here to directly use CI's location so that we have useful
1054 // attribution for the generated code.
1055 B.SetCurrentDebugLocation(CI->getDebugLoc());
1056
1057 BasicBlock *BBCI = CI->getParent();
1058 BasicBlock *BBTail =
1059 SplitBlock(BBCI, CI, DTU, nullptr, nullptr, BBCI->getName() + ".tail");
1060
1062 for (uint64_t I = 0; I < N; ++I)
1063 BBSubs.push_back(
1064 BasicBlock::Create(Ctx, "sub_" + Twine(I), BBCI->getParent(), BBTail));
1065 BasicBlock *BBNE = BasicBlock::Create(Ctx, "ne", BBCI->getParent(), BBTail);
1066
1067 cast<BranchInst>(BBCI->getTerminator())->setSuccessor(0, BBSubs[0]);
1068
1069 B.SetInsertPoint(BBNE);
1070 PHINode *Phi = B.CreatePHI(CI->getType(), N);
1071 B.CreateBr(BBTail);
1072
1073 Value *Base = LHS;
1074 for (uint64_t i = 0; i < N; ++i) {
1075 B.SetInsertPoint(BBSubs[i]);
1076 Value *VL =
1077 B.CreateZExt(B.CreateLoad(B.getInt8Ty(),
1078 B.CreateInBoundsPtrAdd(Base, B.getInt64(i))),
1079 CI->getType());
1080 Value *VR =
1081 ConstantInt::get(CI->getType(), static_cast<unsigned char>(RHS[i]));
1082 Value *Sub = Swapped ? B.CreateSub(VR, VL) : B.CreateSub(VL, VR);
1083 if (i < N - 1)
1084 B.CreateCondBr(B.CreateICmpNE(Sub, ConstantInt::get(CI->getType(), 0)),
1085 BBNE, BBSubs[i + 1]);
1086 else
1087 B.CreateBr(BBNE);
1088
1089 Phi->addIncoming(Sub, BBSubs[i]);
1090 }
1091
1092 CI->replaceAllUsesWith(Phi);
1093 CI->eraseFromParent();
1094
1095 if (DTU) {
1097 Updates.push_back({DominatorTree::Insert, BBCI, BBSubs[0]});
1098 for (uint64_t i = 0; i < N; ++i) {
1099 if (i < N - 1)
1100 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBSubs[i + 1]});
1101 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBNE});
1102 }
1103 Updates.push_back({DominatorTree::Insert, BBNE, BBTail});
1104 Updates.push_back({DominatorTree::Delete, BBCI, BBTail});
1105 DTU->applyUpdates(Updates);
1106 }
1107}
1108
1109/// Convert memchr with a small constant string into a switch
1110static bool foldMemChr(CallInst *Call, DomTreeUpdater *DTU,
1111 const DataLayout &DL) {
1112 if (isa<Constant>(Call->getArgOperand(1)))
1113 return false;
1114
1115 StringRef Str;
1116 Value *Base = Call->getArgOperand(0);
1117 if (!getConstantStringInfo(Base, Str, /*TrimAtNul=*/false))
1118 return false;
1119
1120 uint64_t N = Str.size();
1121 if (auto *ConstInt = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
1122 uint64_t Val = ConstInt->getZExtValue();
1123 // Ignore the case that n is larger than the size of string.
1124 if (Val > N)
1125 return false;
1126 N = Val;
1127 } else
1128 return false;
1129
1131 return false;
1132
1133 BasicBlock *BB = Call->getParent();
1134 BasicBlock *BBNext = SplitBlock(BB, Call, DTU);
1135 IRBuilder<> IRB(BB);
1136 IntegerType *ByteTy = IRB.getInt8Ty();
1138 SwitchInst *SI = IRB.CreateSwitch(
1139 IRB.CreateTrunc(Call->getArgOperand(1), ByteTy), BBNext, N);
1140 Type *IndexTy = DL.getIndexType(Call->getType());
1142
1143 BasicBlock *BBSuccess = BasicBlock::Create(
1144 Call->getContext(), "memchr.success", BB->getParent(), BBNext);
1145 IRB.SetInsertPoint(BBSuccess);
1146 PHINode *IndexPHI = IRB.CreatePHI(IndexTy, N, "memchr.idx");
1147 Value *FirstOccursLocation = IRB.CreateInBoundsPtrAdd(Base, IndexPHI);
1148 IRB.CreateBr(BBNext);
1149 if (DTU)
1150 Updates.push_back({DominatorTree::Insert, BBSuccess, BBNext});
1151
1153 for (uint64_t I = 0; I < N; ++I) {
1154 ConstantInt *CaseVal = ConstantInt::get(ByteTy, Str[I]);
1155 if (!Cases.insert(CaseVal).second)
1156 continue;
1157
1158 BasicBlock *BBCase = BasicBlock::Create(Call->getContext(), "memchr.case",
1159 BB->getParent(), BBSuccess);
1160 SI->addCase(CaseVal, BBCase);
1161 IRB.SetInsertPoint(BBCase);
1162 IndexPHI->addIncoming(ConstantInt::get(IndexTy, I), BBCase);
1163 IRB.CreateBr(BBSuccess);
1164 if (DTU) {
1165 Updates.push_back({DominatorTree::Insert, BB, BBCase});
1166 Updates.push_back({DominatorTree::Insert, BBCase, BBSuccess});
1167 }
1168 }
1169
1170 PHINode *PHI =
1171 PHINode::Create(Call->getType(), 2, Call->getName(), BBNext->begin());
1172 PHI->addIncoming(Constant::getNullValue(Call->getType()), BB);
1173 PHI->addIncoming(FirstOccursLocation, BBSuccess);
1174
1175 Call->replaceAllUsesWith(PHI);
1176 Call->eraseFromParent();
1177
1178 if (DTU)
1179 DTU->applyUpdates(Updates);
1180
1181 return true;
1182}
1183
1186 DominatorTree &DT, const DataLayout &DL,
1187 bool &MadeCFGChange) {
1188
1189 auto *CI = dyn_cast<CallInst>(&I);
1190 if (!CI || CI->isNoBuiltin())
1191 return false;
1192
1193 Function *CalledFunc = CI->getCalledFunction();
1194 if (!CalledFunc)
1195 return false;
1196
1197 LibFunc LF;
1198 if (!TLI.getLibFunc(*CalledFunc, LF) ||
1199 !isLibFuncEmittable(CI->getModule(), &TLI, LF))
1200 return false;
1201
1202 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Lazy);
1203
1204 switch (LF) {
1205 case LibFunc_sqrt:
1206 case LibFunc_sqrtf:
1207 case LibFunc_sqrtl:
1208 return foldSqrt(CI, LF, TTI, TLI, AC, DT);
1209 case LibFunc_strcmp:
1210 case LibFunc_strncmp:
1211 if (StrNCmpInliner(CI, LF, &DTU, DL).optimizeStrNCmp()) {
1212 MadeCFGChange = true;
1213 return true;
1214 }
1215 break;
1216 case LibFunc_memchr:
1217 if (foldMemChr(CI, &DTU, DL)) {
1218 MadeCFGChange = true;
1219 return true;
1220 }
1221 break;
1222 default:;
1223 }
1224 return false;
1225}
1226
1227/// This is the entry point for folds that could be implemented in regular
1228/// InstCombine, but they are separated because they are not expected to
1229/// occur frequently and/or have more than a constant-length pattern match.
1233 AssumptionCache &AC, bool &MadeCFGChange) {
1234 bool MadeChange = false;
1235 for (BasicBlock &BB : F) {
1236 // Ignore unreachable basic blocks.
1237 if (!DT.isReachableFromEntry(&BB))
1238 continue;
1239
1240 const DataLayout &DL = F.getDataLayout();
1241
1242 // Walk the block backwards for efficiency. We're matching a chain of
1243 // use->defs, so we're more likely to succeed by starting from the bottom.
1244 // Also, we want to avoid matching partial patterns.
1245 // TODO: It would be more efficient if we removed dead instructions
1246 // iteratively in this loop rather than waiting until the end.
1248 MadeChange |= foldAnyOrAllBitsSet(I);
1249 MadeChange |= foldGuardedFunnelShift(I, DT);
1250 MadeChange |= tryToRecognizePopCount(I);
1251 MadeChange |= tryToFPToSat(I, TTI);
1252 MadeChange |= tryToRecognizeTableBasedCttz(I);
1253 MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT);
1254 MadeChange |= foldPatternedLoads(I, DL);
1255 // NOTE: This function introduces erasing of the instruction `I`, so it
1256 // needs to be called at the end of this sequence, otherwise we may make
1257 // bugs.
1258 MadeChange |= foldLibCalls(I, TTI, TLI, AC, DT, DL, MadeCFGChange);
1259 }
1260 }
1261
1262 // We're done with transforms, so remove dead instructions.
1263 if (MadeChange)
1264 for (BasicBlock &BB : F)
1266
1267 return MadeChange;
1268}
1269
1270/// This is the entry point for all transforms. Pass manager differences are
1271/// handled in the callers of this function.
1274 AliasAnalysis &AA, bool &MadeCFGChange) {
1275 bool MadeChange = false;
1276 const DataLayout &DL = F.getDataLayout();
1277 TruncInstCombine TIC(AC, TLI, DL, DT);
1278 MadeChange |= TIC.run(F);
1279 MadeChange |= foldUnusualPatterns(F, DT, TTI, TLI, AA, AC, MadeCFGChange);
1280 return MadeChange;
1281}
1282
1285 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1286 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1287 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1288 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1289 auto &AA = AM.getResult<AAManager>(F);
1290 bool MadeCFGChange = false;
1291 if (!runImpl(F, AC, TTI, TLI, DT, AA, MadeCFGChange)) {
1292 // No changes, all analyses are preserved.
1293 return PreservedAnalyses::all();
1294 }
1295 // Mark all the analyses that instcombine updates as preserved.
1297 if (MadeCFGChange)
1299 else
1301 return PA;
1302}
AMDGPU Register Bank Select
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool tryToRecognizePopCount(Instruction &I)
static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT)
Try to replace a mathlib call to sqrt with the LLVM intrinsic.
static bool foldAnyOrAllBitsSet(Instruction &I)
Match patterns that correspond to "any-bits-set" and "all-bits-set".
static cl::opt< unsigned > MemChrInlineThreshold("memchr-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string to " "inline a memchr call."))
static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI)
Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and C2 saturate the value of t...
static cl::opt< unsigned > StrNCmpInlineThreshold("strncmp-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string for a builtin string cmp " "call eligible for inlining. The default value is 3."))
static bool matchAndOrChain(Value *V, MaskOps &MOps)
This is a recursive helper for foldAnyOrAllBitsSet() that walks through a chain of 'and' or 'or' inst...
static bool foldMemChr(CallInst *Call, DomTreeUpdater *DTU, const DataLayout &DL)
Convert memchr with a small constant string into a switch.
static bool foldConsecutiveLoads(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)
static bool runImpl(Function &F, AssumptionCache &AC, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, DominatorTree &DT, AliasAnalysis &AA, bool &MadeCFGChange)
This is the entry point for all transforms.
static bool tryToRecognizeTableBasedCttz(Instruction &I)
static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT)
Match a pattern for a bitwise funnel/rotate operation that partially guards against undefined behavio...
static cl::opt< unsigned > MaxInstrsToScan("aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden, cl::desc("Max number of instructions to scan for aggressive instcombine."))
static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL, AliasAnalysis &AA)
static std::pair< APInt, APInt > getStrideAndModOffsetOfGEP(Value *PtrOp, const DataLayout &DL)
static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul, uint64_t Shift, uint64_t InputBits)
static bool foldPatternedLoads(Instruction &I, const DataLayout &DL)
If C is a constant patterned array and all valid loaded results for given alignment are same to a con...
static bool foldLibCalls(Instruction &I, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, const DataLayout &DL, bool &MadeCFGChange)
static bool foldUnusualPatterns(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AliasAnalysis &AA, AssumptionCache &AC, bool &MadeCFGChange)
This is the entry point for folds that could be implemented in regular InstCombine,...
AggressiveInstCombiner - Combine expression patterns to form expressions with fewer,...
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
bool End
Definition: ELF_riscv.cpp:480
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool runImpl(Function &F, const TargetLowering &TLI)
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:500
static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)
Match UB-safe variants of the funnel shift intrinsic.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static const MCExpr * MaskShift(const MCExpr *Val, uint32_t Mask, uint32_t Shift, MCContext &Ctx)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:166
This pass exposes codegen information to IR-level passes.
Value * LHS
BinaryOperator * Mul
A manager for alias analyses.
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
Class for arbitrary precision integers.
Definition: APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1520
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1330
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:329
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition: APInt.cpp:624
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1710
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1130
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition: APInt.h:286
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:239
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1221
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:410
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:416
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:72
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:696
uint64_t getElementAsInteger(unsigned i) const
If this is a sequential container of integers (of any size), return the specified element in the low ...
Definition: Constants.cpp:3114
unsigned getNumElements() const
Return the number of elements in the array or vector.
Definition: Constants.cpp:2857
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition: IRBuilder.h:1830
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition: IRBuilder.cpp:890
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2060
Value * CreateFreeze(Value *V, const Twine &Name="")
Definition: IRBuilder.h:2566
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition: IRBuilder.h:2002
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition: IRBuilder.h:308
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:2429
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition: IRBuilder.h:1167
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2273
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1439
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2048
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1498
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition: IRBuilder.h:2580
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition: IRBuilder.h:2034
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition: IRBuilder.h:1138
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:177
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Definition: IRBuilder.h:2007
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition: IRBuilder.h:513
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Definition: IRBuilder.h:499
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2697
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
Definition: Metadata.cpp:1764
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Definition: Metadata.cpp:1750
Class to represent integer types.
Definition: DerivedTypes.h:42
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
An instruction for reading from memory.
Definition: Instructions.h:176
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:261
Value * getPointerOperand()
Definition: Instructions.h:255
bool isSimple() const
Definition: Instructions.h:247
Representation for a specific memory location.
MemoryLocation getWithNewSize(LocationSize NewSize) const
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:146
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
static constexpr size_t npos
Definition: StringRef.h:53
Multiway switch.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
@ TCK_RecipThroughput
Reciprocal throughput.
bool isTypeLegal(Type *Ty) const
Return true if this type is legal.
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const
Determine if the target supports unaligned memory accesses.
bool haveFastSqrt(Type *Ty) const
Return true if the hardware has a fast square-root instruction.
@ None
The cast is not used with a load/store of any kind.
bool run(Function &F)
Perform TruncInst pattern optimization on given function.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:243
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:852
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
const ParentTy * getParent() const
Definition: ilist_node.h:32
#define UINT64_MAX
Definition: DataTypes.h:77
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:771
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
Definition: PatternMatch.h:982
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:826
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:885
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:592
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:903
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:599
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
specific_bbval m_SpecificBB(BasicBlock *BB)
Match a specific basic block value.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< PhiNode * > Phi
Definition: RDFGraph.h:390
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Length
Definition: DWP.cpp:480
bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:657
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition: Local.cpp:737
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)
Check whether the library function is available on target and also that it in the current Module is a...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:420
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
@ And
Bitwise or logical AND of integers.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="", bool Before=false)
Split the specified block at the specified instruction.
bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...
const APInt * Shift
LoadInst * RootInsert
This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and the bit indexes (Mask) nee...
MaskOps(unsigned BitWidth, bool MatchAnds)
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
AAMDNodes concat(const AAMDNodes &Other) const
Determine the best AAMDNodes after concatenating two different locations together.
A MapVector that performs no allocations if smaller than a certain size.
Definition: MapVector.h:254