LLVM 23.0.0git
AggressiveInstCombine.cpp
Go to the documentation of this file.
1//===- AggressiveInstCombine.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the aggressive expression pattern combiner classes.
10// Currently, it handles expression patterns for:
11// * Truncate instruction
12//
13//===----------------------------------------------------------------------===//
14
17#include "llvm/ADT/Statistic.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/MDBuilder.h"
40
41using namespace llvm;
42using namespace PatternMatch;
43
44#define DEBUG_TYPE "aggressive-instcombine"
45
46namespace llvm {
48}
49
50STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
51STATISTIC(NumGuardedRotates,
52 "Number of guarded rotates transformed into funnel shifts");
53STATISTIC(NumGuardedFunnelShifts,
54 "Number of guarded funnel shifts transformed into funnel shifts");
55STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
56STATISTIC(NumSelectCTTZFolded,
57 "Number of select-based split cttz patterns folded");
58STATISTIC(NumSelectCTLZFolded,
59 "Number of select-based split ctlz patterns folded");
60
62 "aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden,
63 cl::desc("Max number of instructions to scan for aggressive instcombine."));
64
66 "strncmp-inline-threshold", cl::init(3), cl::Hidden,
67 cl::desc("The maximum length of a constant string for a builtin string cmp "
68 "call eligible for inlining. The default value is 3."));
69
71 MemChrInlineThreshold("memchr-inline-threshold", cl::init(3), cl::Hidden,
72 cl::desc("The maximum length of a constant string to "
73 "inline a memchr call."));
74
75/// Try to fold a select-based split cttz pattern into a single full-width cttz.
76///
77/// %lo = trunc iN %val to i(N/2)
78/// %cmp = icmp eq i(N/2) %lo, 0
79/// %shr = lshr iN %val, N/2
80/// %hi = trunc iN %shr to i(N/2)
81/// %cttz_hi = call i(N/2) @llvm.cttz.i(N/2)(i(N/2) %hi, ...)
82/// %hi_plus = add/or_disjoint i(N/2) %cttz_hi, N/2
83/// %cttz_lo = call i(N/2) @llvm.cttz.i(N/2)(i(N/2) %lo, ...)
84/// %result = select i1 %cmp, i(N/2) %hi_plus, i(N/2) %cttz_lo
85/// -->
86/// %cttz_wide = call iN @llvm.cttz.iN(iN %val, i1 false)
87/// %result = trunc iN %cttz_wide to i(N/2)
88/// Alive proof (for i64/i32): https://alive2.llvm.org/ce/z/-s14-s
90 Value *Cond, *TrueVal, *FalseVal;
91 if (!match(&I, m_Select(m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal))))
92 return false;
93
94 Type *HalfTy = I.getType();
95 if (!HalfTy->isIntegerTy())
96 return false;
97 unsigned HalfWidth = HalfTy->getIntegerBitWidth();
98
99 // Bail out on very small types (i1, i2): the full-width cttz can return
100 // values not representable in the half type (e.g., cttz.i4 can return 4,
101 // which doesn't fit in i2).
102 if (HalfWidth <= 2)
103 return false;
104
105 unsigned FullWidth = HalfWidth * 2;
106
107 // select (icmp eq (trunc SrcVal to i(N/2)), 0), HiResult, LoResult
108 // Or select (icmp ne ...), LoResult, HiResult
109 Value *LoTrunc;
110 Value *HiResult, *LoResult;
111 if (match(Cond,
113 HiResult = TrueVal;
114 LoResult = FalseVal;
115 } else if (match(Cond, m_SpecificICmp(CmpInst::ICMP_NE, m_Value(LoTrunc),
116 m_ZeroInt()))) {
117 HiResult = FalseVal;
118 LoResult = TrueVal;
119 } else {
120 return false;
121 }
122
123 // LoTrunc: trunc iN SrcVal to i(N/2)
124 Value *SrcVal;
125 if (!match(LoTrunc, m_Trunc(m_Value(SrcVal))))
126 return false;
127 if (!SrcVal->getType()->isIntegerTy(FullWidth))
128 return false;
129
130 // LoResult: cttz(trunc(SrcVal), _), must use same truncated value
132 m_Specific(LoTrunc), m_Value()))))
133 return false;
134
135 // HiResult: add/or_disjoint(cttz(trunc(lshr(SrcVal, N/2)), _), N/2)
136 Value *CttzHiCall;
137 if (!match(HiResult, m_OneUse(m_AddLike(m_Value(CttzHiCall),
138 m_SpecificInt(HalfWidth)))))
139 return false;
140
141 Value *HiCttzArg;
143 m_Value(HiCttzArg), m_Value()))))
144 return false;
145
146 if (!match(HiCttzArg,
147 m_Trunc(m_LShr(m_Specific(SrcVal), m_SpecificInt(HalfWidth)))))
148 return false;
149
150 // Match successful.
151 IRBuilder<> Builder(&I);
152 Value *CttzWide = Builder.CreateIntrinsic(
153 Intrinsic::cttz, {SrcVal->getType()}, {SrcVal, Builder.getFalse()});
154 Value *Trunc = Builder.CreateTrunc(CttzWide, HalfTy);
155
156 I.replaceAllUsesWith(Trunc);
157 ++NumSelectCTTZFolded;
158 return true;
159}
160
161/// Same as foldSelectSplitCTTZ but for leading zeros (ctlz).
162///
163/// %shr = lshr iN %val, N/2
164/// %hi = trunc iN %shr to i(N/2)
165/// %cmp = icmp eq i(N/2) %hi, 0 (or icmp eq iN %shr, 0)
166/// %lo = trunc iN %val to i(N/2)
167/// %ctlz_lo = call i(N/2) @llvm.ctlz.i(N/2)(i(N/2) %lo, ...)
168/// %lo_plus = add/or_disjoint i(N/2) %ctlz_lo, N/2
169/// %ctlz_hi = call i(N/2) @llvm.ctlz.i(N/2)(i(N/2) %hi, ...)
170/// %result = select i1 %cmp, i(N/2) %lo_plus, i(N/2) %ctlz_hi
171/// -->
172/// %ctlz_wide = call iN @llvm.ctlz.iN(iN %val, i1 false)
173/// %result = trunc iN %ctlz_wide to i(N/2)
174///
175/// Alive proof (for i64/i32): https://alive2.llvm.org/ce/z/WfQepH
177 Value *Cond, *TrueVal, *FalseVal;
178 if (!match(&I, m_Select(m_Value(Cond), m_Value(TrueVal), m_Value(FalseVal))))
179 return false;
180
181 Type *HalfTy = I.getType();
182 if (!HalfTy->isIntegerTy())
183 return false;
184 unsigned HalfWidth = HalfTy->getIntegerBitWidth();
185
186 // Bail out on very small types (i1, i2): the full-width ctlz can return
187 // values not representable in the half type (e.g., ctlz.i4 can return 4,
188 // which doesn't fit in i2).
189 if (HalfWidth <= 2)
190 return false;
191
192 unsigned FullWidth = HalfWidth * 2;
193
194 // select (icmp eq HiPart, 0), LoResult, HiResult
195 // HiPart could be (trunc (lshr SrcVal, N/2) to i(N/2)) or (lshr SrcVal, N/2)
196 Value *HiPart;
197 Value *LoResult, *HiResult;
198 if (match(Cond,
200 LoResult = TrueVal; // upper is zero: count in lower + N/2
201 HiResult = FalseVal; // upper non-zero: count in upper
202 } else if (match(Cond, m_SpecificICmp(CmpInst::ICMP_NE, m_Value(HiPart),
203 m_ZeroInt()))) {
204 LoResult = FalseVal;
205 HiResult = TrueVal;
206 } else {
207 return false;
208 }
209
210 // Extract SrcVal from HiPart: either trunc(lshr(SrcVal, N/2)) or
211 // lshr(SrcVal, N/2)
212 Value *SrcVal;
213 if (match(HiPart,
214 m_Trunc(m_LShr(m_Value(SrcVal), m_SpecificInt(HalfWidth))))) {
215 // HiPart is trunc(lshr(SrcVal, N/2))
216 } else if (match(HiPart, m_LShr(m_Value(SrcVal), m_SpecificInt(HalfWidth)))) {
217 // HiPart is lshr(SrcVal, N/2)
218 } else {
219 return false;
220 }
221 if (!SrcVal->getType()->isIntegerTy(FullWidth))
222 return false;
223
224 // HiResult: ctlz(trunc(lshr(SrcVal, N/2)), _)
225 Value *HiCtlzArg;
226 if (!match(HiResult, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_Value(HiCtlzArg),
227 m_Value()))))
228 return false;
229
230 if (!match(HiCtlzArg,
231 m_Trunc(m_LShr(m_Specific(SrcVal), m_SpecificInt(HalfWidth)))))
232 return false;
233
234 // LoResult: add/or_disjoint(ctlz(trunc(SrcVal), _), N/2)
235 Value *CtlzLoCall;
236 if (!match(LoResult, m_OneUse(m_AddLike(m_Value(CtlzLoCall),
237 m_SpecificInt(HalfWidth)))))
238 return false;
239
240 Value *LoCtlzArg;
242 m_Value(LoCtlzArg), m_Value()))))
243 return false;
244
245 if (!match(LoCtlzArg, m_Trunc(m_Specific(SrcVal))))
246 return false;
247
248 // Match successful.
249 IRBuilder<> Builder(&I);
250 Value *CtlzWide = Builder.CreateIntrinsic(
251 Intrinsic::ctlz, {SrcVal->getType()}, {SrcVal, Builder.getFalse()});
252 Value *Trunc = Builder.CreateTrunc(CtlzWide, HalfTy);
253
254 I.replaceAllUsesWith(Trunc);
255 ++NumSelectCTLZFolded;
256 return true;
257}
258
259/// Match a pattern for a bitwise funnel/rotate operation that partially guards
260/// against undefined behavior by branching around the funnel-shift/rotation
261/// when the shift amount is 0.
263 if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
264 return false;
265
266 // As with the one-use checks below, this is not strictly necessary, but we
267 // are being cautious to avoid potential perf regressions on targets that
268 // do not actually have a funnel/rotate instruction (where the funnel shift
269 // would be expanded back into math/shift/logic ops).
270 if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
271 return false;
272
273 // Match V to funnel shift left/right and capture the source operands and
274 // shift amount.
275 auto matchFunnelShift = [](Value *V, Value *&ShVal0, Value *&ShVal1,
276 Value *&ShAmt) {
277 unsigned Width = V->getType()->getScalarSizeInBits();
278
279 // fshl(ShVal0, ShVal1, ShAmt)
280 // == (ShVal0 << ShAmt) | (ShVal1 >> (Width -ShAmt))
281 if (match(V, m_OneUse(m_c_Or(
282 m_Shl(m_Value(ShVal0), m_Value(ShAmt)),
283 m_LShr(m_Value(ShVal1), m_Sub(m_SpecificInt(Width),
284 m_Deferred(ShAmt))))))) {
285 return Intrinsic::fshl;
286 }
287
288 // fshr(ShVal0, ShVal1, ShAmt)
289 // == (ShVal0 >> ShAmt) | (ShVal1 << (Width - ShAmt))
290 if (match(V,
292 m_Value(ShAmt))),
293 m_LShr(m_Value(ShVal1), m_Deferred(ShAmt)))))) {
294 return Intrinsic::fshr;
295 }
296
298 };
299
300 // One phi operand must be a funnel/rotate operation, and the other phi
301 // operand must be the source value of that funnel/rotate operation:
302 // phi [ rotate(RotSrc, ShAmt), FunnelBB ], [ RotSrc, GuardBB ]
303 // phi [ fshl(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal0, GuardBB ]
304 // phi [ fshr(ShVal0, ShVal1, ShAmt), FunnelBB ], [ ShVal1, GuardBB ]
305 PHINode &Phi = cast<PHINode>(I);
306 unsigned FunnelOp = 0, GuardOp = 1;
307 Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
308 Value *ShVal0, *ShVal1, *ShAmt;
309 Intrinsic::ID IID = matchFunnelShift(P0, ShVal0, ShVal1, ShAmt);
310 if (IID == Intrinsic::not_intrinsic ||
311 (IID == Intrinsic::fshl && ShVal0 != P1) ||
312 (IID == Intrinsic::fshr && ShVal1 != P1)) {
313 IID = matchFunnelShift(P1, ShVal0, ShVal1, ShAmt);
314 if (IID == Intrinsic::not_intrinsic ||
315 (IID == Intrinsic::fshl && ShVal0 != P0) ||
316 (IID == Intrinsic::fshr && ShVal1 != P0))
317 return false;
318 assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
319 "Pattern must match funnel shift left or right");
320 std::swap(FunnelOp, GuardOp);
321 }
322
323 // The incoming block with our source operand must be the "guard" block.
324 // That must contain a cmp+branch to avoid the funnel/rotate when the shift
325 // amount is equal to 0. The other incoming block is the block with the
326 // funnel/rotate.
327 BasicBlock *GuardBB = Phi.getIncomingBlock(GuardOp);
328 BasicBlock *FunnelBB = Phi.getIncomingBlock(FunnelOp);
329 Instruction *TermI = GuardBB->getTerminator();
330
331 // Ensure that the shift values dominate each block.
332 if (!DT.dominates(ShVal0, TermI) || !DT.dominates(ShVal1, TermI))
333 return false;
334
335 BasicBlock *PhiBB = Phi.getParent();
337 m_ZeroInt()),
338 m_SpecificBB(PhiBB), m_SpecificBB(FunnelBB))))
339 return false;
340
341 IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
342
343 if (ShVal0 == ShVal1)
344 ++NumGuardedRotates;
345 else
346 ++NumGuardedFunnelShifts;
347
348 // If this is not a rotate then the select was blocking poison from the
349 // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
350 bool IsFshl = IID == Intrinsic::fshl;
351 if (ShVal0 != ShVal1) {
352 if (IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal1))
353 ShVal1 = Builder.CreateFreeze(ShVal1);
354 else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(ShVal0))
355 ShVal0 = Builder.CreateFreeze(ShVal0);
356 }
357
358 // We matched a variation of this IR pattern:
359 // GuardBB:
360 // %cmp = icmp eq i32 %ShAmt, 0
361 // br i1 %cmp, label %PhiBB, label %FunnelBB
362 // FunnelBB:
363 // %sub = sub i32 32, %ShAmt
364 // %shr = lshr i32 %ShVal1, %sub
365 // %shl = shl i32 %ShVal0, %ShAmt
366 // %fsh = or i32 %shr, %shl
367 // br label %PhiBB
368 // PhiBB:
369 // %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
370 // -->
371 // llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
372 Phi.replaceAllUsesWith(
373 Builder.CreateIntrinsic(IID, Phi.getType(), {ShVal0, ShVal1, ShAmt}));
374 return true;
375}
376
377/// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
378/// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
379/// of 'and' ops, then we also need to capture the fact that we saw an
380/// "and X, 1", so that's an extra return value for that case.
381namespace {
382struct MaskOps {
383 Value *Root = nullptr;
384 APInt Mask;
385 bool MatchAndChain;
386 bool FoundAnd1 = false;
387
388 MaskOps(unsigned BitWidth, bool MatchAnds)
389 : Mask(APInt::getZero(BitWidth)), MatchAndChain(MatchAnds) {}
390};
391} // namespace
392
393/// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
394/// chain of 'and' or 'or' instructions looking for shift ops of a common source
395/// value. Examples:
396/// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
397/// returns { X, 0x129 }
398/// and (and (X >> 1), 1), (X >> 4)
399/// returns { X, 0x12 }
400static bool matchAndOrChain(Value *V, MaskOps &MOps) {
401 Value *Op0, *Op1;
402 if (MOps.MatchAndChain) {
403 // Recurse through a chain of 'and' operands. This requires an extra check
404 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
405 // in the chain to know that all of the high bits are cleared.
406 if (match(V, m_And(m_Value(Op0), m_One()))) {
407 MOps.FoundAnd1 = true;
408 return matchAndOrChain(Op0, MOps);
409 }
410 if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
411 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
412 } else {
413 // Recurse through a chain of 'or' operands.
414 if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
415 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
416 }
417
418 // We need a shift-right or a bare value representing a compare of bit 0 of
419 // the original source operand.
420 Value *Candidate;
421 const APInt *BitIndex = nullptr;
422 if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
423 Candidate = V;
424
425 // Initialize result source operand.
426 if (!MOps.Root)
427 MOps.Root = Candidate;
428
429 // The shift constant is out-of-range? This code hasn't been simplified.
430 if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
431 return false;
432
433 // Fill in the mask bit derived from the shift constant.
434 MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
435 return MOps.Root == Candidate;
436}
437
438/// Match patterns that correspond to "any-bits-set" and "all-bits-set".
439/// These will include a chain of 'or' or 'and'-shifted bits from a
440/// common source value:
441/// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
442/// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
443/// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
444/// that differ only with a final 'not' of the result. We expect that final
445/// 'not' to be folded with the compare that we create here (invert predicate).
447 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
448 // final "and X, 1" instruction must be the final op in the sequence.
449 bool MatchAllBitsSet;
450 bool MatchTrunc;
451 Value *X;
452 if (I.getType()->isIntOrIntVectorTy(1)) {
453 if (match(&I, m_Trunc(m_OneUse(m_And(m_Value(), m_Value())))))
454 MatchAllBitsSet = true;
455 else if (match(&I, m_Trunc(m_OneUse(m_Or(m_Value(), m_Value())))))
456 MatchAllBitsSet = false;
457 else
458 return false;
459 MatchTrunc = true;
460 X = I.getOperand(0);
461 } else {
462 if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value()))) {
463 X = &I;
464 MatchAllBitsSet = true;
465 } else if (match(&I,
466 m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One()))) {
467 X = I.getOperand(0);
468 MatchAllBitsSet = false;
469 } else
470 return false;
471 MatchTrunc = false;
472 }
473 Type *Ty = X->getType();
474
475 MaskOps MOps(Ty->getScalarSizeInBits(), MatchAllBitsSet);
476 if (!matchAndOrChain(X, MOps) ||
477 (MatchAllBitsSet && !MatchTrunc && !MOps.FoundAnd1))
478 return false;
479
480 // The pattern was found. Create a masked compare that replaces all of the
481 // shift and logic ops.
482 IRBuilder<> Builder(&I);
483 Constant *Mask = ConstantInt::get(Ty, MOps.Mask);
484 Value *And = Builder.CreateAnd(MOps.Root, Mask);
485 Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
486 : Builder.CreateIsNotNull(And);
487 Value *Zext = MatchTrunc ? Cmp : Builder.CreateZExt(Cmp, Ty);
488 I.replaceAllUsesWith(Zext);
489 ++NumAnyOrAllBitsSet;
490 return true;
491}
492
493// Try to recognize below function as popcount intrinsic.
494// This is the "best" algorithm from
495// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
496// Also used in TargetLowering::expandCTPOP().
497//
498// int popcount(unsigned int i) {
499// i = i - ((i >> 1) & 0x55555555);
500// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
501// i = ((i + (i >> 4)) & 0x0F0F0F0F);
502// return (i * 0x01010101) >> 24;
503// }
505 if (I.getOpcode() != Instruction::LShr)
506 return false;
507
508 Type *Ty = I.getType();
509 if (!Ty->isIntOrIntVectorTy())
510 return false;
511
512 unsigned Len = Ty->getScalarSizeInBits();
513 // FIXME: fix Len == 8 and other irregular type lengths.
514 if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
515 return false;
516
517 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
518 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
519 APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
520 APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
521 APInt MaskShift = APInt(Len, Len - 8);
522
523 Value *Op0 = I.getOperand(0);
524 Value *Op1 = I.getOperand(1);
525 Value *MulOp0;
526 // Matching "(i * 0x01010101...) >> 24".
527 if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
529 Value *ShiftOp0;
530 // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
531 if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
532 m_Deferred(ShiftOp0)),
533 m_SpecificInt(Mask0F)))) {
534 Value *AndOp0;
535 // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
536 if (match(ShiftOp0,
537 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
539 m_SpecificInt(Mask33))))) {
540 Value *Root, *SubOp1;
541 // Matching "i - ((i >> 1) & 0x55555555...)".
542 const APInt *AndMask;
543 if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
544 match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
545 m_APInt(AndMask)))) {
546 auto CheckAndMask = [&]() {
547 if (*AndMask == Mask55)
548 return true;
549
550 // Exact match failed, see if any bits are known to be 0 where we
551 // expect a 1 in the mask.
552 if (!AndMask->isSubsetOf(Mask55))
553 return false;
554
555 APInt NeededMask = Mask55 & ~*AndMask;
556 return MaskedValueIsZero(cast<Instruction>(SubOp1)->getOperand(0),
557 NeededMask,
558 SimplifyQuery(I.getDataLayout()));
559 };
560
561 if (CheckAndMask()) {
562 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
563 IRBuilder<> Builder(&I);
564 I.replaceAllUsesWith(
565 Builder.CreateIntrinsic(Intrinsic::ctpop, I.getType(), {Root}));
566 ++NumPopCountRecognized;
567 return true;
568 }
569 }
570 }
571 }
572 }
573
574 return false;
575}
576
577// Try to recognize below function as popcount intrinsic.
578// Ref. Hackers Delight
579// int popcnt(unsigned x) {
580// x = x - ((x >> 1) & 0x55555555);
581// x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
582// x = (x + (x >> 4)) & 0x0F0F0F0F;
583// x = x + (x >> 8);
584// x = x + (x >> 16);
585// return x & 0x0000003F;
586// }
587
588// int popcnt(unsigned x) {
589// x = x - ((x >> 1) & 0x55555555);
590// x = x - 3*((x >> 2) & 0x33333333);
591// x = (x + (x >> 4)) & 0x0F0F0F0F;
592// x = x + (x >> 8);
593// x = x + (x >> 16);
594// return x & 0x0000003F;
595// }
596
598 if (I.getOpcode() != Instruction::And)
599 return false;
600
601 Type *Ty = I.getType();
602 if (!Ty->isIntOrIntVectorTy())
603 return false;
604
605 unsigned Len = Ty->getScalarSizeInBits();
606
607 if (Len > 64 || Len <= 8 || Len % 8 != 0)
608 return false;
609
610 // Len should be a power of 2 for the loop to work correctly
611 if (!isPowerOf2_32(Len))
612 return false;
613
614 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
615 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
616 APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
617
618 APInt MaskRes = APInt(Len, 2 * Len - 1);
619
620 Value *Add1;
621 if (!match(&I, m_And(m_Value(Add1), m_SpecificInt(MaskRes))))
622 return false;
623
624 Value *Add2;
625 for (unsigned I = Len; I >= 16; I = I / 2) {
626 // Matching "x = x + (x >> I/2)" for I-bit.
627 if (!match(Add1, m_c_Add(m_LShr(m_Value(Add2), m_SpecificInt(I / 2)),
628 m_Deferred(Add2))))
629 return false;
630 Add1 = Add2;
631 }
632
633 Value *And1 = Add1;
634 // Matching "x = (x + (x >> 4)) & 0x0F0F0F0F".
635 if (!match(And1, m_And(m_c_Add(m_LShr(m_Value(Add2), m_SpecificInt(4)),
636 m_Deferred(Add2)),
637 m_SpecificInt(Mask0F))))
638 return false;
639
640 Value *Sub1;
641 llvm::APInt NegThree(/*BitWidth=*/Len, /*Value=*/-3,
642 /*isSigned=*/true);
643 // x = (x & 0x33333333) + ((x >> 2) & 0x33333333)".
644 if (!match(Add2, m_c_Add(m_And(m_LShr(m_Value(Sub1), m_SpecificInt(2)),
645 m_SpecificInt(Mask33)),
646 m_And(m_Deferred(Sub1), m_SpecificInt(Mask33)))) &&
647 // Matching "x = x - 3*((x >> 2) & 0x33333333)".
649 m_SpecificInt(Mask33)),
650 m_SpecificInt(NegThree)),
651 m_Deferred(Sub1))))
652 return false;
653
654 Value *Root;
655 // x = x - ((x >> 1) & 0x55555555);
656 if (!match(Sub1, m_Sub(m_Value(Root),
658 m_SpecificInt(Mask55)))))
659 return false;
660
661 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
662 IRBuilder<> Builder(&I);
663 I.replaceAllUsesWith(
664 Builder.CreateIntrinsic(Intrinsic::ctpop, I.getType(), {Root}));
665 ++NumPopCountRecognized;
666 return true;
667}
668
669/// Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and
670/// C2 saturate the value of the fp conversion. The transform is not reversable
671/// as the fptosi.sat is more defined than the input - all values produce a
672/// valid value for the fptosi.sat, where as some produce poison for original
673/// that were out of range of the integer conversion. The reversed pattern may
674/// use fmax and fmin instead. As we cannot directly reverse the transform, and
675/// it is not always profitable, we make it conditional on the cost being
676/// reported as lower by TTI.
678 // Look for min(max(fptosi, converting to fptosi_sat.
679 Value *In;
680 const APInt *MinC, *MaxC;
682 m_APInt(MinC))),
683 m_APInt(MaxC))) &&
685 m_APInt(MaxC))),
686 m_APInt(MinC))))
687 return false;
688
689 // Check that the constants clamp a saturate.
690 if (!(*MinC + 1).isPowerOf2() || -*MaxC != *MinC + 1)
691 return false;
692
693 Type *IntTy = I.getType();
694 Type *FpTy = In->getType();
695 Type *SatTy =
696 IntegerType::get(IntTy->getContext(), (*MinC + 1).exactLogBase2() + 1);
697 if (auto *VecTy = dyn_cast<VectorType>(IntTy))
698 SatTy = VectorType::get(SatTy, VecTy->getElementCount());
699
700 // Get the cost of the intrinsic, and check that against the cost of
701 // fptosi+smin+smax
702 InstructionCost SatCost = TTI.getIntrinsicInstrCost(
703 IntrinsicCostAttributes(Intrinsic::fptosi_sat, SatTy, {In}, {FpTy}),
705 SatCost += TTI.getCastInstrCost(Instruction::SExt, IntTy, SatTy,
708
709 InstructionCost MinMaxCost = TTI.getCastInstrCost(
710 Instruction::FPToSI, IntTy, FpTy, TTI::CastContextHint::None,
712 MinMaxCost += TTI.getIntrinsicInstrCost(
713 IntrinsicCostAttributes(Intrinsic::smin, IntTy, {IntTy}),
715 MinMaxCost += TTI.getIntrinsicInstrCost(
716 IntrinsicCostAttributes(Intrinsic::smax, IntTy, {IntTy}),
718
719 if (SatCost >= MinMaxCost)
720 return false;
721
722 IRBuilder<> Builder(&I);
723 Value *Sat =
724 Builder.CreateIntrinsic(Intrinsic::fptosi_sat, {SatTy, FpTy}, In);
725 I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));
726 return true;
727}
728
729/// Try to replace a mathlib call to sqrt with the LLVM intrinsic. This avoids
730/// pessimistic codegen that has to account for setting errno and can enable
731/// vectorization.
732static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
734 DominatorTree &DT) {
735 // If (1) this is a sqrt libcall, (2) we can assume that NAN is not created
736 // (because NNAN or the operand arg must not be less than -0.0) and (2) we
737 // would not end up lowering to a libcall anyway (which could change the value
738 // of errno), then:
739 // (1) errno won't be set.
740 // (2) it is safe to convert this to an intrinsic call.
741 Type *Ty = Call->getType();
742 Value *Arg = Call->getArgOperand(0);
743 if (TTI.haveFastSqrt(Ty) &&
744 (Call->hasNoNaNs() ||
746 Arg, SimplifyQuery(Call->getDataLayout(), &TLI, &DT, &AC, Call)))) {
747 IRBuilder<> Builder(Call);
748 Value *NewSqrt =
749 Builder.CreateIntrinsic(Intrinsic::sqrt, Ty, Arg, Call, "sqrt");
750 Call->replaceAllUsesWith(NewSqrt);
751
752 // Explicitly erase the old call because a call with side effects is not
753 // trivially dead.
754 Call->eraseFromParent();
755 return true;
756 }
757
758 return false;
759}
760
761// Check if this array of constants represents a cttz table.
762// Iterate over the elements from \p Table by trying to find/match all
763// the numbers from 0 to \p InputBits that should represent cttz results.
764static bool isCTTZTable(Constant *Table, const APInt &Mul, const APInt &Shift,
765 const APInt &AndMask, Type *AccessTy,
766 unsigned InputBits, const APInt &GEPIdxFactor,
767 const DataLayout &DL) {
768 for (unsigned Idx = 0; Idx < InputBits; Idx++) {
769 APInt Index =
770 (APInt::getOneBitSet(InputBits, Idx) * Mul).lshr(Shift) & AndMask;
772 ConstantFoldLoadFromConst(Table, AccessTy, Index * GEPIdxFactor, DL));
773 if (!C || C->getValue() != Idx)
774 return false;
775 }
776
777 return true;
778}
779
780// Try to recognize table-based ctz implementation.
781// E.g., an example in C (for more cases please see the llvm/tests):
782// int f(unsigned x) {
783// static const char table[32] =
784// {0, 1, 28, 2, 29, 14, 24, 3, 30,
785// 22, 20, 15, 25, 17, 4, 8, 31, 27,
786// 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9};
787// return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27];
788// }
789// this can be lowered to `cttz` instruction.
790// There is also a special case when the element is 0.
791//
792// The (x & -x) sets the lowest non-zero bit to 1. The multiply is a de-bruijn
793// sequence that contains each pattern of bits in it. The shift extracts
794// the top bits after the multiply, and that index into the table should
795// represent the number of trailing zeros in the original number.
796//
797// Here are some examples or LLVM IR for a 64-bit target:
798//
799// CASE 1:
800// %sub = sub i32 0, %x
801// %and = and i32 %sub, %x
802// %mul = mul i32 %and, 125613361
803// %shr = lshr i32 %mul, 27
804// %idxprom = zext i32 %shr to i64
805// %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz1.table, i64 0,
806// i64 %idxprom
807// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
808//
809// CASE 2:
810// %sub = sub i32 0, %x
811// %and = and i32 %sub, %x
812// %mul = mul i32 %and, 72416175
813// %shr = lshr i32 %mul, 26
814// %idxprom = zext i32 %shr to i64
815// %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @ctz2.table,
816// i64 0, i64 %idxprom
817// %0 = load i16, i16* %arrayidx, align 2, !tbaa !8
818//
819// CASE 3:
820// %sub = sub i32 0, %x
821// %and = and i32 %sub, %x
822// %mul = mul i32 %and, 81224991
823// %shr = lshr i32 %mul, 27
824// %idxprom = zext i32 %shr to i64
825// %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz3.table,
826// i64 0, i64 %idxprom
827// %0 = load i32, i32* %arrayidx, align 4, !tbaa !8
828//
829// CASE 4:
830// %sub = sub i64 0, %x
831// %and = and i64 %sub, %x
832// %mul = mul i64 %and, 283881067100198605
833// %shr = lshr i64 %mul, 58
834// %arrayidx = getelementptr inbounds [64 x i8], [64 x i8]* @table, i64 0,
835// i64 %shr
836// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
837//
838// All these can be lowered to @llvm.cttz.i32/64 intrinsics.
841 if (!LI)
842 return false;
843
844 Type *AccessType = LI->getType();
845 if (!AccessType->isIntegerTy())
846 return false;
847
849 if (!GEP || !GEP->hasNoUnsignedSignedWrap())
850 return false;
851
852 GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
853 if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
854 return false;
855
856 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
857 APInt ModOffset(BW, 0);
859 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset) ||
860 VarOffsets.size() != 1 || ModOffset != 0)
861 return false;
862 auto [GepIdx, GEPScale] = VarOffsets.front();
863
864 Value *X1;
865 const APInt *MulConst, *ShiftConst, *AndCst = nullptr;
866 // Check that the gep variable index is ((x & -x) * MulConst) >> ShiftConst.
867 // This might be extended to the pointer index type, and if the gep index type
868 // has been replaced with an i8 then a new And (and different ShiftConst) will
869 // be present.
870 auto MatchInner = m_LShr(
871 m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)), m_APInt(MulConst)),
872 m_APInt(ShiftConst));
873 if (!match(GepIdx, m_CastOrSelf(MatchInner)) &&
874 !match(GepIdx, m_CastOrSelf(m_And(MatchInner, m_APInt(AndCst)))))
875 return false;
876
877 unsigned InputBits = X1->getType()->getScalarSizeInBits();
878 if (InputBits != 16 && InputBits != 32 && InputBits != 64 && InputBits != 128)
879 return false;
880
881 if (!GEPScale.isIntN(InputBits) ||
882 !isCTTZTable(GVTable->getInitializer(), *MulConst, *ShiftConst,
883 AndCst ? *AndCst : APInt::getAllOnes(InputBits), AccessType,
884 InputBits, GEPScale.zextOrTrunc(InputBits), DL))
885 return false;
886
887 ConstantInt *ZeroTableElem = cast<ConstantInt>(
888 ConstantFoldLoadFromConst(GVTable->getInitializer(), AccessType, DL));
889 bool DefinedForZero = ZeroTableElem->getZExtValue() == InputBits;
890
891 IRBuilder<> B(LI);
892 ConstantInt *BoolConst = B.getInt1(!DefinedForZero);
893 Type *XType = X1->getType();
894 auto Cttz = B.CreateIntrinsic(Intrinsic::cttz, {XType}, {X1, BoolConst});
895 Value *ZExtOrTrunc = nullptr;
896
897 if (DefinedForZero) {
898 ZExtOrTrunc = B.CreateZExtOrTrunc(Cttz, AccessType);
899 } else {
900 // If the value in elem 0 isn't the same as InputBits, we still want to
901 // produce the value from the table.
902 auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0));
903 auto Select = B.CreateSelect(Cmp, B.CreateZExt(ZeroTableElem, XType), Cttz);
904
905 // The true branch of select handles the cttz(0) case, which is rare.
908 SelectI->setMetadata(
909 LLVMContext::MD_prof,
910 MDBuilder(SelectI->getContext()).createUnlikelyBranchWeights());
911 }
912
913 // NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target
914 // it should be handled as: `cttz(x) & (typeSize - 1)`.
915
916 ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType);
917 }
918
919 LI->replaceAllUsesWith(ZExtOrTrunc);
920
921 return true;
922}
923
924// Check if this array of constants represents a log2 table.
925// Iterate over the elements from \p Table by trying to find/match all
926// the numbers from 0 to \p InputBits that should represent log2 results.
927static bool isLog2Table(Constant *Table, const APInt &Mul, const APInt &Shift,
928 Type *AccessTy, unsigned InputBits,
929 const APInt &GEPIdxFactor, const DataLayout &DL) {
930 for (unsigned Idx = 0; Idx < InputBits; Idx++) {
931 APInt Index = (APInt::getLowBitsSet(InputBits, Idx + 1) * Mul).lshr(Shift);
933 ConstantFoldLoadFromConst(Table, AccessTy, Index * GEPIdxFactor, DL));
934 if (!C || C->getValue() != Idx)
935 return false;
936 }
937
938 // Verify that an input of zero will select table index 0.
939 APInt ZeroIndex = Mul.lshr(Shift);
940 if (!ZeroIndex.isZero())
941 return false;
942
943 return true;
944}
945
946// Try to recognize table-based log2 implementation.
947// E.g., an example in C (for more cases please the llvm/tests):
948// int f(unsigned v) {
949// static const char table[32] =
950// {0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
951// 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31};
952//
953// v |= v >> 1; // first round down to one less than a power of 2
954// v |= v >> 2;
955// v |= v >> 4;
956// v |= v >> 8;
957// v |= v >> 16;
958//
959// return table[(unsigned)(v * 0x07C4ACDDU) >> 27];
960// }
961// this can be lowered to `ctlz` instruction.
962// There is also a special case when the element is 0.
963//
964// The >> and |= sequence sets all bits below the most significant set bit. The
965// multiply is a de-bruijn sequence that contains each pattern of bits in it.
966// The shift extracts the top bits after the multiply, and that index into the
967// table should represent the floor log base 2 of the original number.
968//
969// Here are some examples of LLVM IR for a 64-bit target.
970//
971// CASE 1:
972// %shr = lshr i32 %v, 1
973// %or = or i32 %shr, %v
974// %shr1 = lshr i32 %or, 2
975// %or2 = or i32 %shr1, %or
976// %shr3 = lshr i32 %or2, 4
977// %or4 = or i32 %shr3, %or2
978// %shr5 = lshr i32 %or4, 8
979// %or6 = or i32 %shr5, %or4
980// %shr7 = lshr i32 %or6, 16
981// %or8 = or i32 %shr7, %or6
982// %mul = mul i32 %or8, 130329821
983// %shr9 = lshr i32 %mul, 27
984// %idxprom = zext nneg i32 %shr9 to i64
985// %arrayidx = getelementptr inbounds i8, ptr @table, i64 %idxprom
986// %0 = load i8, ptr %arrayidx, align 1
987//
988// CASE 2:
989// %shr = lshr i64 %v, 1
990// %or = or i64 %shr, %v
991// %shr1 = lshr i64 %or, 2
992// %or2 = or i64 %shr1, %or
993// %shr3 = lshr i64 %or2, 4
994// %or4 = or i64 %shr3, %or2
995// %shr5 = lshr i64 %or4, 8
996// %or6 = or i64 %shr5, %or4
997// %shr7 = lshr i64 %or6, 16
998// %or8 = or i64 %shr7, %or6
999// %shr9 = lshr i64 %or8, 32
1000// %or10 = or i64 %shr9, %or8
1001// %mul = mul i64 %or10, 285870213051386505
1002// %shr11 = lshr i64 %mul, 58
1003// %arrayidx = getelementptr inbounds i8, ptr @table, i64 %shr11
1004// %0 = load i8, ptr %arrayidx, align 1
1005//
1006// All these can be lowered to @llvm.ctlz.i32/64 intrinsics and a subtract.
1010 if (!LI)
1011 return false;
1012
1013 Type *AccessType = LI->getType();
1014 if (!AccessType->isIntegerTy())
1015 return false;
1016
1018 if (!GEP || !GEP->hasNoUnsignedSignedWrap())
1019 return false;
1020
1021 GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
1022 if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
1023 return false;
1024
1025 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
1026 APInt ModOffset(BW, 0);
1028 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset) ||
1029 VarOffsets.size() != 1 || ModOffset != 0)
1030 return false;
1031 auto [GepIdx, GEPScale] = VarOffsets.front();
1032
1033 Value *X;
1034 const APInt *MulConst, *ShiftConst;
1035 // Check that the gep variable index is (x * MulConst) >> ShiftConst.
1036 auto MatchInner =
1037 m_LShr(m_Mul(m_Value(X), m_APInt(MulConst)), m_APInt(ShiftConst));
1038 if (!match(GepIdx, m_CastOrSelf(MatchInner)))
1039 return false;
1040
1041 unsigned InputBits = X->getType()->getScalarSizeInBits();
1042 if (InputBits != 16 && InputBits != 32 && InputBits != 64 && InputBits != 128)
1043 return false;
1044
1045 // Verify shift amount.
1046 // TODO: Allow other shift amounts when we have proper test coverage.
1047 if (*ShiftConst != InputBits - Log2_32(InputBits))
1048 return false;
1049
1050 // Match the sequence of OR operations with right shifts by powers of 2.
1051 for (unsigned ShiftAmt = InputBits / 2; ShiftAmt != 0; ShiftAmt /= 2) {
1052 Value *Y;
1053 if (!match(X, m_c_Or(m_LShr(m_Value(Y), m_SpecificInt(ShiftAmt)),
1054 m_Deferred(Y))))
1055 return false;
1056 X = Y;
1057 }
1058
1059 if (!GEPScale.isIntN(InputBits) ||
1060 !isLog2Table(GVTable->getInitializer(), *MulConst, *ShiftConst,
1061 AccessType, InputBits, GEPScale.zextOrTrunc(InputBits), DL))
1062 return false;
1063
1064 ConstantInt *ZeroTableElem = cast<ConstantInt>(
1065 ConstantFoldLoadFromConst(GVTable->getInitializer(), AccessType, DL));
1066
1067 // Use InputBits - 1 - ctlz(X) to compute log2(X).
1068 IRBuilder<> B(LI);
1069 ConstantInt *BoolConst = B.getTrue();
1070 Type *XType = X->getType();
1071
1072 // Check the the backend has an efficient ctlz instruction.
1073 // FIXME: Teach the backend to emit the original code when ctlz isn't
1074 // supported like we do for cttz.
1076 Intrinsic::ctlz, XType,
1077 {PoisonValue::get(XType), /*is_zero_poison=*/BoolConst});
1078 InstructionCost Cost =
1079 TTI.getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency);
1081 return false;
1082
1083 Value *Ctlz = B.CreateIntrinsic(Intrinsic::ctlz, {XType}, {X, BoolConst});
1084
1085 Constant *InputBitsM1 = ConstantInt::get(XType, InputBits - 1);
1086 Value *Sub = B.CreateSub(InputBitsM1, Ctlz);
1087
1088 // The table won't produce a sensible result for 0.
1089 Value *Cmp = B.CreateICmpEQ(X, ConstantInt::get(XType, 0));
1090 Value *Select = B.CreateSelect(Cmp, B.CreateZExt(ZeroTableElem, XType), Sub);
1091
1092 // The true branch of select handles the log2(0) case, which is rare.
1095 SelectI->setMetadata(
1096 LLVMContext::MD_prof,
1097 MDBuilder(SelectI->getContext()).createUnlikelyBranchWeights());
1098 }
1099
1100 Value *ZExtOrTrunc = B.CreateZExtOrTrunc(Select, AccessType);
1101
1102 LI->replaceAllUsesWith(ZExtOrTrunc);
1103
1104 return true;
1105}
1106
1107/// This is used by foldLoadsRecursive() to capture a Root Load node which is
1108/// of type or(load, load) and recursively build the wide load. Also capture the
1109/// shift amount, zero extend type and loadSize.
1119
1120// Identify and Merge consecutive loads recursively which is of the form
1121// (ZExt(L1) << shift1) | (ZExt(L2) << shift2) -> ZExt(L3) << shift1
1122// (ZExt(L1) << shift1) | ZExt(L2) -> ZExt(L3)
1123static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL,
1124 AliasAnalysis &AA, bool IsRoot = false) {
1125 uint64_t ShAmt2;
1126 Value *X;
1127 Instruction *L1, *L2;
1128
1129 // For the root instruction, allow multiple uses since the final result
1130 // may legitimately be used in multiple places. For intermediate values,
1131 // require single use to avoid creating duplicate loads.
1132 if (!IsRoot && !V->hasOneUse())
1133 return false;
1134
1135 if (!match(V, m_c_Or(m_Value(X),
1137 ShAmt2)))))
1138 return false;
1139
1140 if (!foldLoadsRecursive(X, LOps, DL, AA, /*IsRoot=*/false) && LOps.FoundRoot)
1141 // Avoid Partial chain merge.
1142 return false;
1143
1144 // Check if the pattern has loads
1145 LoadInst *LI1 = LOps.Root;
1146 uint64_t ShAmt1 = LOps.Shift;
1147 if (LOps.FoundRoot == false &&
1148 match(X, m_OneUse(
1149 m_ShlOrSelf(m_OneUse(m_ZExt(m_Instruction(L1))), ShAmt1)))) {
1150 LI1 = dyn_cast<LoadInst>(L1);
1151 }
1152 LoadInst *LI2 = dyn_cast<LoadInst>(L2);
1153
1154 // Check if loads are same, atomic, volatile and having same address space.
1155 if (LI1 == LI2 || !LI1 || !LI2 || !LI1->isSimple() || !LI2->isSimple() ||
1157 return false;
1158
1159 // Check if Loads come from same BB.
1160 if (LI1->getParent() != LI2->getParent())
1161 return false;
1162
1163 // Find the data layout
1164 bool IsBigEndian = DL.isBigEndian();
1165
1166 // Check if loads are consecutive and same size.
1167 Value *Load1Ptr = LI1->getPointerOperand();
1168 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
1169 Load1Ptr =
1170 Load1Ptr->stripAndAccumulateConstantOffsets(DL, Offset1,
1171 /* AllowNonInbounds */ true);
1172
1173 Value *Load2Ptr = LI2->getPointerOperand();
1174 APInt Offset2(DL.getIndexTypeSizeInBits(Load2Ptr->getType()), 0);
1175 Load2Ptr =
1176 Load2Ptr->stripAndAccumulateConstantOffsets(DL, Offset2,
1177 /* AllowNonInbounds */ true);
1178
1179 // Verify if both loads have same base pointers
1180 uint64_t LoadSize1 = LI1->getType()->getPrimitiveSizeInBits();
1181 uint64_t LoadSize2 = LI2->getType()->getPrimitiveSizeInBits();
1182 if (Load1Ptr != Load2Ptr)
1183 return false;
1184
1185 // Make sure that there are no padding bits.
1186 if (!DL.typeSizeEqualsStoreSize(LI1->getType()) ||
1187 !DL.typeSizeEqualsStoreSize(LI2->getType()))
1188 return false;
1189
1190 // Alias Analysis to check for stores b/w the loads.
1191 LoadInst *Start = LOps.FoundRoot ? LOps.RootInsert : LI1, *End = LI2;
1193 if (!Start->comesBefore(End)) {
1194 std::swap(Start, End);
1195 // If LOps.RootInsert comes after LI2, since we use LI2 as the new insert
1196 // point, we should make sure whether the memory region accessed by LOps
1197 // isn't modified.
1198 if (LOps.FoundRoot)
1200 LOps.Root->getPointerOperand(),
1201 LocationSize::precise(DL.getTypeStoreSize(
1202 IntegerType::get(LI1->getContext(), LOps.LoadSize))),
1203 LOps.AATags);
1204 else
1205 Loc = MemoryLocation::get(End);
1206 } else
1207 Loc = MemoryLocation::get(End);
1208 unsigned NumScanned = 0;
1209 for (Instruction &Inst :
1210 make_range(Start->getIterator(), End->getIterator())) {
1211 if (Inst.mayWriteToMemory() && isModSet(AA.getModRefInfo(&Inst, Loc)))
1212 return false;
1213
1214 if (++NumScanned > MaxInstrsToScan)
1215 return false;
1216 }
1217
1218 // Make sure Load with lower Offset is at LI1
1219 bool Reverse = false;
1220 if (Offset2.slt(Offset1)) {
1221 std::swap(LI1, LI2);
1222 std::swap(ShAmt1, ShAmt2);
1223 std::swap(Offset1, Offset2);
1224 std::swap(Load1Ptr, Load2Ptr);
1225 std::swap(LoadSize1, LoadSize2);
1226 Reverse = true;
1227 }
1228
1229 // Big endian swap the shifts
1230 if (IsBigEndian)
1231 std::swap(ShAmt1, ShAmt2);
1232
1233 // First load is always LI1. This is where we put the new load.
1234 // Use the merged load size available from LI1 for forward loads.
1235 if (LOps.FoundRoot) {
1236 if (!Reverse)
1237 LoadSize1 = LOps.LoadSize;
1238 else
1239 LoadSize2 = LOps.LoadSize;
1240 }
1241
1242 // Verify if shift amount and load index aligns and verifies that loads
1243 // are consecutive.
1244 uint64_t ShiftDiff = IsBigEndian ? LoadSize2 : LoadSize1;
1245 uint64_t PrevSize =
1246 DL.getTypeStoreSize(IntegerType::get(LI1->getContext(), LoadSize1));
1247 if ((ShAmt2 - ShAmt1) != ShiftDiff || (Offset2 - Offset1) != PrevSize)
1248 return false;
1249
1250 // Update LOps
1251 AAMDNodes AATags1 = LOps.AATags;
1252 AAMDNodes AATags2 = LI2->getAAMetadata();
1253 if (LOps.FoundRoot == false) {
1254 LOps.FoundRoot = true;
1255 AATags1 = LI1->getAAMetadata();
1256 }
1257 LOps.LoadSize = LoadSize1 + LoadSize2;
1258 LOps.RootInsert = Start;
1259
1260 // Concatenate the AATags of the Merged Loads.
1261 LOps.AATags = AATags1.concat(AATags2);
1262
1263 LOps.Root = LI1;
1264 LOps.Shift = ShAmt1;
1265 LOps.ZextType = X->getType();
1266 return true;
1267}
1268
1269// For a given BB instruction, evaluate all loads in the chain that form a
1270// pattern which suggests that the loads can be combined. The one and only use
1271// of the loads is to form a wider load.
1274 const DominatorTree &DT) {
1275 // Only consider load chains of scalar values.
1276 if (isa<VectorType>(I.getType()))
1277 return false;
1278
1279 LoadOps LOps;
1280 if (!foldLoadsRecursive(&I, LOps, DL, AA, /*IsRoot=*/true) || !LOps.FoundRoot)
1281 return false;
1282
1283 IRBuilder<> Builder(&I);
1284 LoadInst *NewLoad = nullptr, *LI1 = LOps.Root;
1285
1286 IntegerType *WiderType = IntegerType::get(I.getContext(), LOps.LoadSize);
1287 // TTI based checks if we want to proceed with wider load
1288 bool Allowed = TTI.isTypeLegal(WiderType);
1289 if (!Allowed)
1290 return false;
1291
1292 unsigned AS = LI1->getPointerAddressSpace();
1293 unsigned Fast = 0;
1294 Allowed = TTI.allowsMisalignedMemoryAccesses(I.getContext(), LOps.LoadSize,
1295 AS, LI1->getAlign(), &Fast);
1296 if (!Allowed || !Fast)
1297 return false;
1298
1299 // Get the Index and Ptr for the new GEP.
1300 Value *Load1Ptr = LI1->getPointerOperand();
1301 Builder.SetInsertPoint(LOps.RootInsert);
1302 if (!DT.dominates(Load1Ptr, LOps.RootInsert)) {
1303 APInt Offset1(DL.getIndexTypeSizeInBits(Load1Ptr->getType()), 0);
1304 Load1Ptr = Load1Ptr->stripAndAccumulateConstantOffsets(
1305 DL, Offset1, /* AllowNonInbounds */ true);
1306 Load1Ptr = Builder.CreatePtrAdd(Load1Ptr, Builder.getInt(Offset1));
1307 }
1308 // Generate wider load.
1309 NewLoad = Builder.CreateAlignedLoad(WiderType, Load1Ptr, LI1->getAlign(),
1310 LI1->isVolatile(), "");
1311 NewLoad->takeName(LI1);
1312 // Set the New Load AATags Metadata.
1313 if (LOps.AATags)
1314 NewLoad->setAAMetadata(LOps.AATags);
1315
1316 Value *NewOp = NewLoad;
1317 // Check if zero extend needed.
1318 if (LOps.ZextType)
1319 NewOp = Builder.CreateZExt(NewOp, LOps.ZextType);
1320
1321 // Check if shift needed. We need to shift with the amount of load1
1322 // shift if not zero.
1323 if (LOps.Shift)
1324 NewOp = Builder.CreateShl(NewOp, LOps.Shift);
1325 I.replaceAllUsesWith(NewOp);
1326
1327 return true;
1328}
1329
1330/// ValWidth bits starting at ValOffset of Val stored at PtrBase+PtrOffset.
1338
1339 bool isCompatibleWith(const PartStore &Other) const {
1340 return PtrBase == Other.PtrBase && Val == Other.Val;
1341 }
1342
1343 bool operator<(const PartStore &Other) const {
1344 return PtrOffset.slt(Other.PtrOffset);
1345 }
1346};
1347
1348static std::optional<PartStore> matchPartStore(Instruction &I,
1349 const DataLayout &DL) {
1350 auto *Store = dyn_cast<StoreInst>(&I);
1351 if (!Store || !Store->isSimple())
1352 return std::nullopt;
1353
1354 Value *StoredVal = Store->getValueOperand();
1355 Type *StoredTy = StoredVal->getType();
1356 if (!StoredTy->isIntegerTy() || !DL.typeSizeEqualsStoreSize(StoredTy))
1357 return std::nullopt;
1358
1359 uint64_t ValWidth = StoredTy->getPrimitiveSizeInBits();
1360 uint64_t ValOffset;
1361 Value *Val;
1362 if (!match(StoredVal, m_Trunc(m_LShrOrSelf(m_Value(Val), ValOffset))))
1363 return std::nullopt;
1364
1365 Value *Ptr = Store->getPointerOperand();
1366 APInt PtrOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
1368 DL, PtrOffset, /*AllowNonInbounds=*/true);
1369 return {{PtrBase, PtrOffset, Val, ValOffset, ValWidth, Store}};
1370}
1371
1373 unsigned Width, const DataLayout &DL,
1375 if (Parts.size() < 2)
1376 return false;
1377
1378 // Check whether combining the stores is profitable.
1379 // FIXME: We could generate smaller stores if we can't produce a large one.
1380 const PartStore &First = Parts.front();
1381 LLVMContext &Ctx = First.Store->getContext();
1382 Type *NewTy = Type::getIntNTy(Ctx, Width);
1383 unsigned Fast = 0;
1384 if (!TTI.isTypeLegal(NewTy) ||
1385 !TTI.allowsMisalignedMemoryAccesses(Ctx, Width,
1386 First.Store->getPointerAddressSpace(),
1387 First.Store->getAlign(), &Fast) ||
1388 !Fast)
1389 return false;
1390
1391 // Generate the combined store.
1392 IRBuilder<> Builder(First.Store);
1393 Value *Val = First.Val;
1394 if (First.ValOffset != 0)
1395 Val = Builder.CreateLShr(Val, First.ValOffset);
1396 Val = Builder.CreateZExtOrTrunc(Val, NewTy);
1397 StoreInst *Store = Builder.CreateAlignedStore(
1398 Val, First.Store->getPointerOperand(), First.Store->getAlign());
1399
1400 // Merge various metadata onto the new store.
1401 AAMDNodes AATags = First.Store->getAAMetadata();
1402 SmallVector<Instruction *> Stores = {First.Store};
1403 Stores.reserve(Parts.size());
1404 SmallVector<DebugLoc> DbgLocs = {First.Store->getDebugLoc()};
1405 DbgLocs.reserve(Parts.size());
1406 for (const PartStore &Part : drop_begin(Parts)) {
1407 AATags = AATags.concat(Part.Store->getAAMetadata());
1408 Stores.push_back(Part.Store);
1409 DbgLocs.push_back(Part.Store->getDebugLoc());
1410 }
1411 Store->setAAMetadata(AATags);
1412 Store->mergeDIAssignID(Stores);
1413 Store->setDebugLoc(DebugLoc::getMergedLocations(DbgLocs));
1414
1415 // Remove the old stores.
1416 for (const PartStore &Part : Parts)
1417 Part.Store->eraseFromParent();
1418
1419 return true;
1420}
1421
1424 if (Parts.size() < 2)
1425 return false;
1426
1427 // We now have multiple parts of the same value stored to the same pointer.
1428 // Sort the parts by pointer offset, and make sure they are consistent with
1429 // the value offsets. Also check that the value is fully covered without
1430 // overlaps.
1431 bool Changed = false;
1432 llvm::sort(Parts);
1433 int64_t LastEndOffsetFromFirst = 0;
1434 const PartStore *First = &Parts[0];
1435 for (const PartStore &Part : Parts) {
1436 APInt PtrOffsetFromFirst = Part.PtrOffset - First->PtrOffset;
1437 int64_t ValOffsetFromFirst = Part.ValOffset - First->ValOffset;
1438 if (PtrOffsetFromFirst * 8 != ValOffsetFromFirst ||
1439 LastEndOffsetFromFirst != ValOffsetFromFirst) {
1441 LastEndOffsetFromFirst, DL, TTI);
1442 First = &Part;
1443 LastEndOffsetFromFirst = Part.ValWidth;
1444 continue;
1445 }
1446
1447 LastEndOffsetFromFirst = ValOffsetFromFirst + Part.ValWidth;
1448 }
1449
1451 LastEndOffsetFromFirst, DL, TTI);
1452 return Changed;
1453}
1454
1457 // FIXME: Add big endian support.
1458 if (DL.isBigEndian())
1459 return false;
1460
1461 BatchAAResults BatchAA(AA);
1463 bool MadeChange = false;
1464 for (Instruction &I : make_early_inc_range(BB)) {
1465 if (std::optional<PartStore> Part = matchPartStore(I, DL)) {
1466 if (Parts.empty() || Part->isCompatibleWith(Parts[0])) {
1467 Parts.push_back(std::move(*Part));
1468 continue;
1469 }
1470
1471 MadeChange |= mergePartStores(Parts, DL, TTI);
1472 Parts.clear();
1473 Parts.push_back(std::move(*Part));
1474 continue;
1475 }
1476
1477 if (Parts.empty())
1478 continue;
1479
1480 if (I.mayThrow() ||
1481 (I.mayReadOrWriteMemory() &&
1483 &I, MemoryLocation::getBeforeOrAfter(Parts[0].PtrBase))))) {
1484 MadeChange |= mergePartStores(Parts, DL, TTI);
1485 Parts.clear();
1486 continue;
1487 }
1488 }
1489
1490 MadeChange |= mergePartStores(Parts, DL, TTI);
1491 return MadeChange;
1492}
1493
1494/// Combine away instructions providing they are still equivalent when compared
1495/// against 0. i.e do they have any bits set.
1497 auto *I = dyn_cast<Instruction>(V);
1498 if (!I || I->getOpcode() != Instruction::Or || !I->hasOneUse())
1499 return nullptr;
1500
1501 Value *A;
1502
1503 // Look deeper into the chain of or's, combining away shl (so long as they are
1504 // nuw or nsw).
1505 Value *Op0 = I->getOperand(0);
1506 if (match(Op0, m_CombineOr(m_NSWShl(m_Value(A), m_Value()),
1507 m_NUWShl(m_Value(A), m_Value()))))
1508 Op0 = A;
1509 else if (auto *NOp = optimizeShiftInOrChain(Op0, Builder))
1510 Op0 = NOp;
1511
1512 Value *Op1 = I->getOperand(1);
1513 if (match(Op1, m_CombineOr(m_NSWShl(m_Value(A), m_Value()),
1514 m_NUWShl(m_Value(A), m_Value()))))
1515 Op1 = A;
1516 else if (auto *NOp = optimizeShiftInOrChain(Op1, Builder))
1517 Op1 = NOp;
1518
1519 if (Op0 != I->getOperand(0) || Op1 != I->getOperand(1))
1520 return Builder.CreateOr(Op0, Op1);
1521 return nullptr;
1522}
1523
1526 const DominatorTree &DT) {
1527 CmpPredicate Pred;
1528 Value *Op0;
1529 if (!match(&I, m_ICmp(Pred, m_Value(Op0), m_Zero())) ||
1530 !ICmpInst::isEquality(Pred))
1531 return false;
1532
1533 // If the chain or or's matches a load, combine to that before attempting to
1534 // remove shifts.
1535 if (auto OpI = dyn_cast<Instruction>(Op0))
1536 if (OpI->getOpcode() == Instruction::Or)
1537 if (foldConsecutiveLoads(*OpI, DL, TTI, AA, DT))
1538 return true;
1539
1540 IRBuilder<> Builder(&I);
1541 // icmp eq/ne or(shl(a), b), 0 -> icmp eq/ne or(a, b), 0
1542 if (auto *Res = optimizeShiftInOrChain(Op0, Builder)) {
1543 I.replaceAllUsesWith(Builder.CreateICmp(Pred, Res, I.getOperand(1)));
1544 return true;
1545 }
1546
1547 return false;
1548}
1549
1550// Calculate GEP Stride and accumulated const ModOffset. Return Stride and
1551// ModOffset
1552static std::pair<APInt, APInt>
1554 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
1555 std::optional<APInt> Stride;
1556 APInt ModOffset(BW, 0);
1557 // Return a minimum gep stride, greatest common divisor of consective gep
1558 // index scales(c.f. Bézout's identity).
1559 while (auto *GEP = dyn_cast<GEPOperator>(PtrOp)) {
1561 if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset))
1562 break;
1563
1564 for (auto [V, Scale] : VarOffsets) {
1565 // Only keep a power of two factor for non-inbounds
1566 if (!GEP->hasNoUnsignedSignedWrap())
1567 Scale = APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1568
1569 if (!Stride)
1570 Stride = Scale;
1571 else
1572 Stride = APIntOps::GreatestCommonDivisor(*Stride, Scale);
1573 }
1574
1575 PtrOp = GEP->getPointerOperand();
1576 }
1577
1578 // Check whether pointer arrives back at Global Variable via at least one GEP.
1579 // Even if it doesn't, we can check by alignment.
1580 if (!isa<GlobalVariable>(PtrOp) || !Stride)
1581 return {APInt(BW, 1), APInt(BW, 0)};
1582
1583 // In consideration of signed GEP indices, non-negligible offset become
1584 // remainder of division by minimum GEP stride.
1585 ModOffset = ModOffset.srem(*Stride);
1586 if (ModOffset.isNegative())
1587 ModOffset += *Stride;
1588
1589 return {*Stride, ModOffset};
1590}
1591
1592/// If C is a constant patterned array and all valid loaded results for given
1593/// alignment are same to a constant, return that constant.
1595 auto *LI = dyn_cast<LoadInst>(&I);
1596 if (!LI || LI->isVolatile())
1597 return false;
1598
1599 // We can only fold the load if it is from a constant global with definitive
1600 // initializer. Skip expensive logic if this is not the case.
1601 auto *PtrOp = LI->getPointerOperand();
1603 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
1604 return false;
1605
1606 // Bail for large initializers in excess of 4K to avoid too many scans.
1607 Constant *C = GV->getInitializer();
1608 uint64_t GVSize = DL.getTypeAllocSize(C->getType());
1609 if (!GVSize || 4096 < GVSize)
1610 return false;
1611
1612 Type *LoadTy = LI->getType();
1613 unsigned BW = DL.getIndexTypeSizeInBits(PtrOp->getType());
1614 auto [Stride, ConstOffset] = getStrideAndModOffsetOfGEP(PtrOp, DL);
1615
1616 // Any possible offset could be multiple of GEP stride. And any valid
1617 // offset is multiple of load alignment, so checking only multiples of bigger
1618 // one is sufficient to say results' equality.
1619 if (auto LA = LI->getAlign();
1620 LA <= GV->getAlign().valueOrOne() && Stride.getZExtValue() < LA.value()) {
1621 ConstOffset = APInt(BW, 0);
1622 Stride = APInt(BW, LA.value());
1623 }
1624
1625 Constant *Ca = ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL);
1626 if (!Ca)
1627 return false;
1628
1629 unsigned E = GVSize - DL.getTypeStoreSize(LoadTy);
1630 for (; ConstOffset.getZExtValue() <= E; ConstOffset += Stride)
1631 if (Ca != ConstantFoldLoadFromConst(C, LoadTy, ConstOffset, DL))
1632 return false;
1633
1634 I.replaceAllUsesWith(Ca);
1635
1636 return true;
1637}
1638
1639namespace {
1640class StrNCmpInliner {
1641public:
1642 StrNCmpInliner(CallInst *CI, LibFunc Func, DomTreeUpdater *DTU,
1643 const DataLayout &DL)
1644 : CI(CI), Func(Func), DTU(DTU), DL(DL) {}
1645
1646 bool optimizeStrNCmp();
1647
1648private:
1649 void inlineCompare(Value *LHS, StringRef RHS, uint64_t N, bool Swapped);
1650
1651 CallInst *CI;
1652 LibFunc Func;
1653 DomTreeUpdater *DTU;
1654 const DataLayout &DL;
1655};
1656
1657} // namespace
1658
1659/// First we normalize calls to strncmp/strcmp to the form of
1660/// compare(s1, s2, N), which means comparing first N bytes of s1 and s2
1661/// (without considering '\0').
1662///
1663/// Examples:
1664///
1665/// \code
1666/// strncmp(s, "a", 3) -> compare(s, "a", 2)
1667/// strncmp(s, "abc", 3) -> compare(s, "abc", 3)
1668/// strncmp(s, "a\0b", 3) -> compare(s, "a\0b", 2)
1669/// strcmp(s, "a") -> compare(s, "a", 2)
1670///
1671/// char s2[] = {'a'}
1672/// strncmp(s, s2, 3) -> compare(s, s2, 3)
1673///
1674/// char s2[] = {'a', 'b', 'c', 'd'}
1675/// strncmp(s, s2, 3) -> compare(s, s2, 3)
1676/// \endcode
1677///
1678/// We only handle cases where N and exactly one of s1 and s2 are constant.
1679/// Cases that s1 and s2 are both constant are already handled by the
1680/// instcombine pass.
1681///
1682/// We do not handle cases where N > StrNCmpInlineThreshold.
1683///
1684/// We also do not handles cases where N < 2, which are already
1685/// handled by the instcombine pass.
1686///
1687bool StrNCmpInliner::optimizeStrNCmp() {
1688 if (StrNCmpInlineThreshold < 2)
1689 return false;
1690
1692 return false;
1693
1694 Value *Str1P = CI->getArgOperand(0);
1695 Value *Str2P = CI->getArgOperand(1);
1696 // Should be handled elsewhere.
1697 if (Str1P == Str2P)
1698 return false;
1699
1700 StringRef Str1, Str2;
1701 bool HasStr1 = getConstantStringInfo(Str1P, Str1, /*TrimAtNul=*/false);
1702 bool HasStr2 = getConstantStringInfo(Str2P, Str2, /*TrimAtNul=*/false);
1703 if (HasStr1 == HasStr2)
1704 return false;
1705
1706 // Note that '\0' and characters after it are not trimmed.
1707 StringRef Str = HasStr1 ? Str1 : Str2;
1708 Value *StrP = HasStr1 ? Str2P : Str1P;
1709
1710 size_t Idx = Str.find('\0');
1711 uint64_t N = Idx == StringRef::npos ? UINT64_MAX : Idx + 1;
1712 if (Func == LibFunc_strncmp) {
1713 if (auto *ConstInt = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
1714 N = std::min(N, ConstInt->getZExtValue());
1715 else
1716 return false;
1717 }
1718 // Now N means how many bytes we need to compare at most.
1719 if (N > Str.size() || N < 2 || N > StrNCmpInlineThreshold)
1720 return false;
1721
1722 // Cases where StrP has two or more dereferenceable bytes might be better
1723 // optimized elsewhere.
1724 bool CanBeNull = false, CanBeFreed = false;
1725 if (StrP->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed) > 1)
1726 return false;
1727 inlineCompare(StrP, Str, N, HasStr1);
1728 return true;
1729}
1730
1731/// Convert
1732///
1733/// \code
1734/// ret = compare(s1, s2, N)
1735/// \endcode
1736///
1737/// into
1738///
1739/// \code
1740/// ret = (int)s1[0] - (int)s2[0]
1741/// if (ret != 0)
1742/// goto NE
1743/// ...
1744/// ret = (int)s1[N-2] - (int)s2[N-2]
1745/// if (ret != 0)
1746/// goto NE
1747/// ret = (int)s1[N-1] - (int)s2[N-1]
1748/// NE:
1749/// \endcode
1750///
1751/// CFG before and after the transformation:
1752///
1753/// (before)
1754/// BBCI
1755///
1756/// (after)
1757/// BBCI -> BBSubs[0] (sub,icmp) --NE-> BBNE -> BBTail
1758/// | ^
1759/// E |
1760/// | |
1761/// BBSubs[1] (sub,icmp) --NE-----+
1762/// ... |
1763/// BBSubs[N-1] (sub) ---------+
1764///
1765void StrNCmpInliner::inlineCompare(Value *LHS, StringRef RHS, uint64_t N,
1766 bool Swapped) {
1767 auto &Ctx = CI->getContext();
1768 IRBuilder<> B(Ctx);
1769 // We want these instructions to be recognized as inlined instructions for the
1770 // compare call, but we don't have a source location for the definition of
1771 // that function, since we're generating that code now. Because the generated
1772 // code is a viable point for a memory access error, we make the pragmatic
1773 // choice here to directly use CI's location so that we have useful
1774 // attribution for the generated code.
1775 B.SetCurrentDebugLocation(CI->getDebugLoc());
1776
1777 BasicBlock *BBCI = CI->getParent();
1778 BasicBlock *BBTail =
1779 SplitBlock(BBCI, CI, DTU, nullptr, nullptr, BBCI->getName() + ".tail");
1780
1782 for (uint64_t I = 0; I < N; ++I)
1783 BBSubs.push_back(
1784 BasicBlock::Create(Ctx, "sub_" + Twine(I), BBCI->getParent(), BBTail));
1785 BasicBlock *BBNE = BasicBlock::Create(Ctx, "ne", BBCI->getParent(), BBTail);
1786
1787 cast<UncondBrInst>(BBCI->getTerminator())->setSuccessor(BBSubs[0]);
1788
1789 B.SetInsertPoint(BBNE);
1790 PHINode *Phi = B.CreatePHI(CI->getType(), N);
1791 B.CreateBr(BBTail);
1792
1793 Value *Base = LHS;
1794 for (uint64_t i = 0; i < N; ++i) {
1795 B.SetInsertPoint(BBSubs[i]);
1796 Value *VL =
1797 B.CreateZExt(B.CreateLoad(B.getInt8Ty(),
1798 B.CreateInBoundsPtrAdd(Base, B.getInt64(i))),
1799 CI->getType());
1800 Value *VR =
1801 ConstantInt::get(CI->getType(), static_cast<unsigned char>(RHS[i]));
1802 Value *Sub = Swapped ? B.CreateSub(VR, VL) : B.CreateSub(VL, VR);
1803 if (i < N - 1) {
1804 CondBrInst *CondBrInst = B.CreateCondBr(
1805 B.CreateICmpNE(Sub, ConstantInt::get(CI->getType(), 0)), BBNE,
1806 BBSubs[i + 1]);
1807
1808 Function *F = CI->getFunction();
1809 assert(F && "Instruction does not belong to a function!");
1810 std::optional<Function::ProfileCount> EC = F->getEntryCount();
1811 if (EC && EC->getCount() > 0)
1813 } else {
1814 B.CreateBr(BBNE);
1815 }
1816
1817 Phi->addIncoming(Sub, BBSubs[i]);
1818 }
1819
1820 CI->replaceAllUsesWith(Phi);
1821 CI->eraseFromParent();
1822
1823 if (DTU) {
1825 Updates.push_back({DominatorTree::Insert, BBCI, BBSubs[0]});
1826 for (uint64_t i = 0; i < N; ++i) {
1827 if (i < N - 1)
1828 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBSubs[i + 1]});
1829 Updates.push_back({DominatorTree::Insert, BBSubs[i], BBNE});
1830 }
1831 Updates.push_back({DominatorTree::Insert, BBNE, BBTail});
1832 Updates.push_back({DominatorTree::Delete, BBCI, BBTail});
1833 DTU->applyUpdates(Updates);
1834 }
1835}
1836
1837/// Convert memchr with a small constant string into a switch
1839 const DataLayout &DL) {
1840 if (isa<Constant>(Call->getArgOperand(1)))
1841 return false;
1842
1843 StringRef Str;
1844 Value *Base = Call->getArgOperand(0);
1845 if (!getConstantStringInfo(Base, Str, /*TrimAtNul=*/false))
1846 return false;
1847
1848 uint64_t N = Str.size();
1849 if (auto *ConstInt = dyn_cast<ConstantInt>(Call->getArgOperand(2))) {
1850 uint64_t Val = ConstInt->getZExtValue();
1851 // Ignore the case that n is larger than the size of string.
1852 if (Val > N)
1853 return false;
1854 N = Val;
1855 } else
1856 return false;
1857
1859 return false;
1860
1861 BasicBlock *BB = Call->getParent();
1862 BasicBlock *BBNext = SplitBlock(BB, Call, DTU);
1863 IRBuilder<> IRB(BB);
1864 IRB.SetCurrentDebugLocation(Call->getDebugLoc());
1865 IntegerType *ByteTy = IRB.getInt8Ty();
1867 SwitchInst *SI = IRB.CreateSwitch(
1868 IRB.CreateTrunc(Call->getArgOperand(1), ByteTy), BBNext, N);
1869 // We can't know the precise weights here, as they would depend on the value
1870 // distribution of Call->getArgOperand(1). So we just mark it as "unknown".
1872 Type *IndexTy = DL.getIndexType(Call->getType());
1874
1875 BasicBlock *BBSuccess = BasicBlock::Create(
1876 Call->getContext(), "memchr.success", BB->getParent(), BBNext);
1877 IRB.SetInsertPoint(BBSuccess);
1878 PHINode *IndexPHI = IRB.CreatePHI(IndexTy, N, "memchr.idx");
1879 Value *FirstOccursLocation = IRB.CreateInBoundsPtrAdd(Base, IndexPHI);
1880 IRB.CreateBr(BBNext);
1881 if (DTU)
1882 Updates.push_back({DominatorTree::Insert, BBSuccess, BBNext});
1883
1885 for (uint64_t I = 0; I < N; ++I) {
1886 ConstantInt *CaseVal =
1887 ConstantInt::get(ByteTy, static_cast<unsigned char>(Str[I]));
1888 if (!Cases.insert(CaseVal).second)
1889 continue;
1890
1891 BasicBlock *BBCase = BasicBlock::Create(Call->getContext(), "memchr.case",
1892 BB->getParent(), BBSuccess);
1893 SI->addCase(CaseVal, BBCase);
1894 IRB.SetInsertPoint(BBCase);
1895 IndexPHI->addIncoming(ConstantInt::get(IndexTy, I), BBCase);
1896 IRB.CreateBr(BBSuccess);
1897 if (DTU) {
1898 Updates.push_back({DominatorTree::Insert, BB, BBCase});
1899 Updates.push_back({DominatorTree::Insert, BBCase, BBSuccess});
1900 }
1901 }
1902
1903 PHINode *PHI =
1904 PHINode::Create(Call->getType(), 2, Call->getName(), BBNext->begin());
1905 PHI->addIncoming(Constant::getNullValue(Call->getType()), BB);
1906 PHI->addIncoming(FirstOccursLocation, BBSuccess);
1907
1908 Call->replaceAllUsesWith(PHI);
1909 Call->eraseFromParent();
1910
1911 if (DTU)
1912 DTU->applyUpdates(Updates);
1913
1914 return true;
1915}
1916
1919 DominatorTree &DT, const DataLayout &DL,
1920 bool &MadeCFGChange) {
1921
1922 auto *CI = dyn_cast<CallInst>(&I);
1923 if (!CI || CI->isNoBuiltin())
1924 return false;
1925
1926 Function *CalledFunc = CI->getCalledFunction();
1927 if (!CalledFunc)
1928 return false;
1929
1930 LibFunc LF;
1931 if (!TLI.getLibFunc(*CalledFunc, LF) ||
1932 !isLibFuncEmittable(CI->getModule(), &TLI, LF))
1933 return false;
1934
1935 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Lazy);
1936
1937 switch (LF) {
1938 case LibFunc_sqrt:
1939 case LibFunc_sqrtf:
1940 case LibFunc_sqrtl:
1941 return foldSqrt(CI, LF, TTI, TLI, AC, DT);
1942 case LibFunc_strcmp:
1943 case LibFunc_strncmp:
1944 if (StrNCmpInliner(CI, LF, &DTU, DL).optimizeStrNCmp()) {
1945 MadeCFGChange = true;
1946 return true;
1947 }
1948 break;
1949 case LibFunc_memchr:
1950 if (foldMemChr(CI, &DTU, DL)) {
1951 MadeCFGChange = true;
1952 return true;
1953 }
1954 break;
1955 default:;
1956 }
1957 return false;
1958}
1959
1960/// Match high part of long multiplication.
1961///
1962/// Considering a multiply made up of high and low parts, we can split the
1963/// multiply into:
1964/// x * y == (xh*T + xl) * (yh*T + yl)
1965/// where xh == x>>32 and xl == x & 0xffffffff. T = 2^32.
1966/// This expands to
1967/// xh*yh*T*T + xh*yl*T + xl*yh*T + xl*yl
1968/// which can be drawn as
1969/// [ xh*yh ]
1970/// [ xh*yl ]
1971/// [ xl*yh ]
1972/// [ xl*yl ]
1973/// We are looking for the "high" half, which is xh*yh + xh*yl>>32 + xl*yh>>32 +
1974/// some carrys. The carry makes this difficult and there are multiple ways of
1975/// representing it. The ones we attempt to support here are:
1976/// Carry: xh*yh + carry + lowsum
1977/// carry = lowsum < xh*yl ? 0x1000000 : 0
1978/// lowsum = xh*yl + xl*yh + (xl*yl>>32)
1979/// Ladder: xh*yh + c2>>32 + c3>>32
1980/// c2 = xh*yl + (xl*yl>>32); c3 = c2&0xffffffff + xl*yh
1981/// or c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32); c3 = xl*yh
1982/// Carry4: xh*yh + carry + crosssum>>32 + (xl*yl + crosssum&0xffffffff) >> 32
1983/// crosssum = xh*yl + xl*yh
1984/// carry = crosssum < xh*yl ? 0x1000000 : 0
1985/// Ladder4: xh*yh + (xl*yh)>>32 + (xh*yl)>>32 + low>>32;
1986/// low = (xl*yl)>>32 + (xl*yh)&0xffffffff + (xh*yl)&0xffffffff
1987///
1988/// They all start by matching xh*yh + 2 or 3 other operands. The bottom of the
1989/// tree is xh*yh, xh*yl, xl*yh and xl*yl.
1991 Type *Ty = I.getType();
1992 if (!Ty->isIntOrIntVectorTy())
1993 return false;
1994
1995 unsigned BitWidth = Ty->getScalarSizeInBits();
1997 if (BitWidth % 2 != 0)
1998 return false;
1999
2000 auto CreateMulHigh = [&](Value *X, Value *Y) {
2001 IRBuilder<> Builder(&I);
2002 Type *NTy = Ty->getWithNewBitWidth(BitWidth * 2);
2003 Value *XExt = Builder.CreateZExt(X, NTy);
2004 Value *YExt = Builder.CreateZExt(Y, NTy);
2005 Value *Mul = Builder.CreateMul(XExt, YExt, "", /*HasNUW=*/true);
2006 Value *High = Builder.CreateLShr(Mul, BitWidth);
2007 Value *Res = Builder.CreateTrunc(High, Ty, "", /*HasNUW=*/true);
2008 Res->takeName(&I);
2009 I.replaceAllUsesWith(Res);
2010 LLVM_DEBUG(dbgs() << "Created long multiply from parts of " << *X << " and "
2011 << *Y << "\n");
2012 return true;
2013 };
2014
2015 // Common check routines for X_lo*Y_lo and X_hi*Y_lo
2016 auto CheckLoLo = [&](Value *XlYl, Value *X, Value *Y) {
2017 return match(XlYl, m_c_Mul(m_And(m_Specific(X), m_SpecificInt(LowMask)),
2018 m_And(m_Specific(Y), m_SpecificInt(LowMask))));
2019 };
2020 auto CheckHiLo = [&](Value *XhYl, Value *X, Value *Y) {
2021 return match(XhYl,
2023 m_And(m_Specific(Y), m_SpecificInt(LowMask))));
2024 };
2025
2026 auto FoldMulHighCarry = [&](Value *X, Value *Y, Instruction *Carry,
2027 Instruction *B) {
2028 // Looking for LowSum >> 32 and carry (select)
2029 if (Carry->getOpcode() != Instruction::Select)
2030 std::swap(Carry, B);
2031
2032 // Carry = LowSum < XhYl ? 0x100000000 : 0
2033 Value *LowSum, *XhYl;
2034 if (!match(Carry,
2037 m_Value(XhYl))),
2039 m_Zero()))))
2040 return false;
2041
2042 // XhYl can be Xh*Yl or Xl*Yh
2043 if (!CheckHiLo(XhYl, X, Y)) {
2044 if (CheckHiLo(XhYl, Y, X))
2045 std::swap(X, Y);
2046 else
2047 return false;
2048 }
2049 if (XhYl->hasNUsesOrMore(3))
2050 return false;
2051
2052 // B = LowSum >> 32
2053 if (!match(B, m_OneUse(m_LShr(m_Specific(LowSum),
2054 m_SpecificInt(BitWidth / 2)))) ||
2055 LowSum->hasNUsesOrMore(3))
2056 return false;
2057
2058 // LowSum = XhYl + XlYh + XlYl>>32
2059 Value *XlYh, *XlYl;
2060 auto XlYlHi = m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2));
2061 if (!match(LowSum,
2062 m_c_Add(m_Specific(XhYl),
2063 m_OneUse(m_c_Add(m_OneUse(m_Value(XlYh)), XlYlHi)))) &&
2064 !match(LowSum, m_c_Add(m_OneUse(m_Value(XlYh)),
2065 m_OneUse(m_c_Add(m_Specific(XhYl), XlYlHi)))) &&
2066 !match(LowSum,
2067 m_c_Add(XlYlHi, m_OneUse(m_c_Add(m_Specific(XhYl),
2068 m_OneUse(m_Value(XlYh)))))))
2069 return false;
2070
2071 // Check XlYl and XlYh
2072 if (!CheckLoLo(XlYl, X, Y))
2073 return false;
2074 if (!CheckHiLo(XlYh, Y, X))
2075 return false;
2076
2077 return CreateMulHigh(X, Y);
2078 };
2079
2080 auto FoldMulHighLadder = [&](Value *X, Value *Y, Instruction *A,
2081 Instruction *B) {
2082 // xh*yh + c2>>32 + c3>>32
2083 // c2 = xh*yl + (xl*yl>>32); c3 = c2&0xffffffff + xl*yh
2084 // or c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32); c3 = xh*yl
2085 Value *XlYh, *XhYl, *XlYl, *C2, *C3;
2086 // Strip off the two expected shifts.
2087 if (!match(A, m_LShr(m_Value(C2), m_SpecificInt(BitWidth / 2))) ||
2089 return false;
2090
2091 if (match(C3, m_c_Add(m_Add(m_Value(), m_Value()), m_Value())))
2092 std::swap(C2, C3);
2093 // Try to match c2 = (xl*yh&0xffffffff) + xh*yl + (xl*yl>>32)
2094 if (match(C2,
2096 m_Value(XlYh)),
2097 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2)))) ||
2099 m_LShr(m_Value(XlYl),
2100 m_SpecificInt(BitWidth / 2))),
2101 m_Value(XlYh))) ||
2103 m_SpecificInt(BitWidth / 2)),
2104 m_Value(XlYh)),
2105 m_And(m_Specific(C3), m_SpecificInt(LowMask))))) {
2106 XhYl = C3;
2107 } else {
2108 // Match c3 = c2&0xffffffff + xl*yh
2109 if (!match(C3, m_c_Add(m_And(m_Specific(C2), m_SpecificInt(LowMask)),
2110 m_Value(XlYh))))
2111 std::swap(C2, C3);
2112 if (!match(C3, m_c_Add(m_OneUse(
2113 m_And(m_Specific(C2), m_SpecificInt(LowMask))),
2114 m_Value(XlYh))) ||
2115 !C3->hasOneUse() || C2->hasNUsesOrMore(3))
2116 return false;
2117
2118 // Match c2 = xh*yl + (xl*yl >> 32)
2119 if (!match(C2, m_c_Add(m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2)),
2120 m_Value(XhYl))))
2121 return false;
2122 }
2123
2124 // Match XhYl and XlYh - they can appear either way around.
2125 if (!CheckHiLo(XlYh, Y, X))
2126 std::swap(XlYh, XhYl);
2127 if (!CheckHiLo(XlYh, Y, X))
2128 return false;
2129 if (!CheckHiLo(XhYl, X, Y))
2130 return false;
2131 if (!CheckLoLo(XlYl, X, Y))
2132 return false;
2133
2134 return CreateMulHigh(X, Y);
2135 };
2136
2137 auto FoldMulHighLadder4 = [&](Value *X, Value *Y, Instruction *A,
2139 /// Ladder4: xh*yh + (xl*yh)>>32 + (xh+yl)>>32 + low>>32;
2140 /// low = (xl*yl)>>32 + (xl*yh)&0xffffffff + (xh*yl)&0xffffffff
2141
2142 // Find A = Low >> 32 and B/C = XhYl>>32, XlYh>>32.
2143 auto ShiftAdd =
2145 if (!match(A, ShiftAdd))
2146 std::swap(A, B);
2147 if (!match(A, ShiftAdd))
2148 std::swap(A, C);
2149 Value *Low;
2151 return false;
2152
2153 // Match B == XhYl>>32 and C == XlYh>>32
2154 Value *XhYl, *XlYh;
2155 if (!match(B, m_LShr(m_Value(XhYl), m_SpecificInt(BitWidth / 2))) ||
2156 !match(C, m_LShr(m_Value(XlYh), m_SpecificInt(BitWidth / 2))))
2157 return false;
2158 if (!CheckHiLo(XhYl, X, Y))
2159 std::swap(XhYl, XlYh);
2160 if (!CheckHiLo(XhYl, X, Y) || XhYl->hasNUsesOrMore(3))
2161 return false;
2162 if (!CheckHiLo(XlYh, Y, X) || XlYh->hasNUsesOrMore(3))
2163 return false;
2164
2165 // Match Low as XlYl>>32 + XhYl&0xffffffff + XlYh&0xffffffff
2166 Value *XlYl;
2167 if (!match(
2168 Low,
2169 m_c_Add(
2171 m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))),
2172 m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))))),
2173 m_OneUse(
2174 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))) &&
2175 !match(
2176 Low,
2177 m_c_Add(
2179 m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))),
2180 m_OneUse(
2181 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))),
2182 m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))))) &&
2183 !match(
2184 Low,
2185 m_c_Add(
2187 m_OneUse(m_And(m_Specific(XlYh), m_SpecificInt(LowMask))),
2188 m_OneUse(
2189 m_LShr(m_Value(XlYl), m_SpecificInt(BitWidth / 2))))),
2190 m_OneUse(m_And(m_Specific(XhYl), m_SpecificInt(LowMask))))))
2191 return false;
2192 if (!CheckLoLo(XlYl, X, Y))
2193 return false;
2194
2195 return CreateMulHigh(X, Y);
2196 };
2197
2198 auto FoldMulHighCarry4 = [&](Value *X, Value *Y, Instruction *Carry,
2200 // xh*yh + carry + crosssum>>32 + (xl*yl + crosssum&0xffffffff) >> 32
2201 // crosssum = xh*yl+xl*yh
2202 // carry = crosssum < xh*yl ? 0x1000000 : 0
2203 if (Carry->getOpcode() != Instruction::Select)
2204 std::swap(Carry, B);
2205 if (Carry->getOpcode() != Instruction::Select)
2206 std::swap(Carry, C);
2207
2208 // Carry = CrossSum < XhYl ? 0x100000000 : 0
2209 Value *CrossSum, *XhYl;
2210 if (!match(Carry,
2213 m_Value(CrossSum), m_Value(XhYl))),
2215 m_Zero()))))
2216 return false;
2217
2218 if (!match(B, m_LShr(m_Specific(CrossSum), m_SpecificInt(BitWidth / 2))))
2219 std::swap(B, C);
2220 if (!match(B, m_LShr(m_Specific(CrossSum), m_SpecificInt(BitWidth / 2))))
2221 return false;
2222
2223 Value *XlYl, *LowAccum;
2224 if (!match(C, m_LShr(m_Value(LowAccum), m_SpecificInt(BitWidth / 2))) ||
2225 !match(LowAccum, m_c_Add(m_OneUse(m_LShr(m_Value(XlYl),
2226 m_SpecificInt(BitWidth / 2))),
2227 m_OneUse(m_And(m_Specific(CrossSum),
2228 m_SpecificInt(LowMask))))) ||
2229 LowAccum->hasNUsesOrMore(3))
2230 return false;
2231 if (!CheckLoLo(XlYl, X, Y))
2232 return false;
2233
2234 if (!CheckHiLo(XhYl, X, Y))
2235 std::swap(X, Y);
2236 if (!CheckHiLo(XhYl, X, Y))
2237 return false;
2238 Value *XlYh;
2239 if (!match(CrossSum, m_c_Add(m_Specific(XhYl), m_OneUse(m_Value(XlYh)))) ||
2240 !CheckHiLo(XlYh, Y, X) || CrossSum->hasNUsesOrMore(4) ||
2241 XhYl->hasNUsesOrMore(3))
2242 return false;
2243
2244 return CreateMulHigh(X, Y);
2245 };
2246
2247 // X and Y are the two inputs, A, B and C are other parts of the pattern
2248 // (crosssum>>32, carry, etc).
2249 Value *X, *Y;
2250 Instruction *A, *B, *C;
2251 auto HiHi = m_OneUse(m_Mul(m_LShr(m_Value(X), m_SpecificInt(BitWidth / 2)),
2253 if ((match(&I, m_c_Add(HiHi, m_OneUse(m_Add(m_Instruction(A),
2254 m_Instruction(B))))) ||
2256 m_OneUse(m_c_Add(HiHi, m_Instruction(B)))))) &&
2257 A->hasOneUse() && B->hasOneUse())
2258 if (FoldMulHighCarry(X, Y, A, B) || FoldMulHighLadder(X, Y, A, B))
2259 return true;
2260
2261 if ((match(&I, m_c_Add(HiHi, m_OneUse(m_c_Add(
2264 m_Instruction(C))))))) ||
2268 m_Instruction(C))))))) ||
2272 m_OneUse(m_c_Add(HiHi, m_Instruction(C))))))) ||
2273 match(&I,
2276 A->hasOneUse() && B->hasOneUse() && C->hasOneUse())
2277 return FoldMulHighCarry4(X, Y, A, B, C) ||
2278 FoldMulHighLadder4(X, Y, A, B, C);
2279
2280 return false;
2281}
2282
2283/// This is the entry point for folds that could be implemented in regular
2284/// InstCombine, but they are separated because they are not expected to
2285/// occur frequently and/or have more than a constant-length pattern match.
2289 AssumptionCache &AC, bool &MadeCFGChange) {
2290 bool MadeChange = false;
2291 for (BasicBlock &BB : F) {
2292 // Ignore unreachable basic blocks.
2293 if (!DT.isReachableFromEntry(&BB))
2294 continue;
2295
2296 const DataLayout &DL = F.getDataLayout();
2297
2298 // Walk the block backwards for efficiency. We're matching a chain of
2299 // use->defs, so we're more likely to succeed by starting from the bottom.
2300 // Also, we want to avoid matching partial patterns.
2301 // TODO: It would be more efficient if we removed dead instructions
2302 // iteratively in this loop rather than waiting until the end.
2304 MadeChange |= foldAnyOrAllBitsSet(I);
2305 MadeChange |= foldGuardedFunnelShift(I, DT);
2306 MadeChange |= foldSelectSplitCTTZ(I);
2307 MadeChange |= foldSelectSplitCTLZ(I);
2308 MadeChange |= tryToRecognizePopCount(I);
2309 MadeChange |= tryToRecognizePopCount2n3(I);
2310 MadeChange |= tryToFPToSat(I, TTI);
2311 MadeChange |= tryToRecognizeTableBasedCttz(I, DL);
2312 MadeChange |= tryToRecognizeTableBasedLog2(I, DL, TTI);
2313 MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT);
2314 MadeChange |= foldPatternedLoads(I, DL);
2315 MadeChange |= foldICmpOrChain(I, DL, TTI, AA, DT);
2316 MadeChange |= foldMulHigh(I);
2317 // NOTE: This function introduces erasing of the instruction `I`, so it
2318 // needs to be called at the end of this sequence, otherwise we may make
2319 // bugs.
2320 MadeChange |= foldLibCalls(I, TTI, TLI, AC, DT, DL, MadeCFGChange);
2321 }
2322
2323 // Do this separately to avoid redundantly scanning stores multiple times.
2324 MadeChange |= foldConsecutiveStores(BB, DL, TTI, AA);
2325 }
2326
2327 // We're done with transforms, so remove dead instructions.
2328 if (MadeChange)
2329 for (BasicBlock &BB : F)
2331
2332 return MadeChange;
2333}
2334
2335/// This is the entry point for all transforms. Pass manager differences are
2336/// handled in the callers of this function.
2339 AliasAnalysis &AA, bool &MadeCFGChange) {
2340 bool MadeChange = false;
2341 const DataLayout &DL = F.getDataLayout();
2342 TruncInstCombine TIC(AC, TLI, DL, DT);
2343 MadeChange |= TIC.run(F);
2344 MadeChange |= foldUnusualPatterns(F, DT, TTI, TLI, AA, AC, MadeCFGChange);
2345 return MadeChange;
2346}
2347
2350 auto &AC = AM.getResult<AssumptionAnalysis>(F);
2351 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2352 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2353 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
2354 auto &AA = AM.getResult<AAManager>(F);
2355 bool MadeCFGChange = false;
2356 if (!runImpl(F, AC, TTI, TLI, DT, AA, MadeCFGChange)) {
2357 // No changes, all analyses are preserved.
2358 return PreservedAnalyses::all();
2359 }
2360 // Mark all the analyses that instcombine updates as preserved.
2362 if (MadeCFGChange)
2364 else
2366 return PA;
2367}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool tryToRecognizePopCount(Instruction &I)
static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT)
Try to replace a mathlib call to sqrt with the LLVM intrinsic.
static bool isLog2Table(Constant *Table, const APInt &Mul, const APInt &Shift, Type *AccessTy, unsigned InputBits, const APInt &GEPIdxFactor, const DataLayout &DL)
static bool foldAnyOrAllBitsSet(Instruction &I)
Match patterns that correspond to "any-bits-set" and "all-bits-set".
static cl::opt< unsigned > MemChrInlineThreshold("memchr-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string to " "inline a memchr call."))
static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI)
Fold smin(smax(fptosi(x), C1), C2) to llvm.fptosi.sat(x), providing C1 and C2 saturate the value of t...
static cl::opt< unsigned > StrNCmpInlineThreshold("strncmp-inline-threshold", cl::init(3), cl::Hidden, cl::desc("The maximum length of a constant string for a builtin string cmp " "call eligible for inlining. The default value is 3."))
static bool matchAndOrChain(Value *V, MaskOps &MOps)
This is a recursive helper for foldAnyOrAllBitsSet() that walks through a chain of 'and' or 'or' inst...
static bool foldMemChr(CallInst *Call, DomTreeUpdater *DTU, const DataLayout &DL)
Convert memchr with a small constant string into a switch.
static bool tryToRecognizePopCount2n3(Instruction &I)
static Value * optimizeShiftInOrChain(Value *V, IRBuilder<> &Builder)
Combine away instructions providing they are still equivalent when compared against 0.
static bool foldConsecutiveLoads(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)
static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT)
Match a pattern for a bitwise funnel/rotate operation that partially guards against undefined behavio...
static bool tryToRecognizeTableBasedCttz(Instruction &I, const DataLayout &DL)
static bool mergePartStores(SmallVectorImpl< PartStore > &Parts, const DataLayout &DL, TargetTransformInfo &TTI)
static bool foldLoadsRecursive(Value *V, LoadOps &LOps, const DataLayout &DL, AliasAnalysis &AA, bool IsRoot=false)
static bool mergeConsecutivePartStores(ArrayRef< PartStore > Parts, unsigned Width, const DataLayout &DL, TargetTransformInfo &TTI)
static cl::opt< unsigned > MaxInstrsToScan("aggressive-instcombine-max-scan-instrs", cl::init(64), cl::Hidden, cl::desc("Max number of instructions to scan for aggressive instcombine."))
static bool foldSelectSplitCTTZ(Instruction &I)
Try to fold a select-based split cttz pattern into a single full-width cttz.
static bool foldSelectSplitCTLZ(Instruction &I)
Same as foldSelectSplitCTTZ but for leading zeros (ctlz).
static bool foldICmpOrChain(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA, const DominatorTree &DT)
static bool isCTTZTable(Constant *Table, const APInt &Mul, const APInt &Shift, const APInt &AndMask, Type *AccessTy, unsigned InputBits, const APInt &GEPIdxFactor, const DataLayout &DL)
static std::optional< PartStore > matchPartStore(Instruction &I, const DataLayout &DL)
static bool foldConsecutiveStores(BasicBlock &BB, const DataLayout &DL, TargetTransformInfo &TTI, AliasAnalysis &AA)
static std::pair< APInt, APInt > getStrideAndModOffsetOfGEP(Value *PtrOp, const DataLayout &DL)
static bool foldPatternedLoads(Instruction &I, const DataLayout &DL)
If C is a constant patterned array and all valid loaded results for given alignment are same to a con...
static bool tryToRecognizeTableBasedLog2(Instruction &I, const DataLayout &DL, TargetTransformInfo &TTI)
static bool foldLibCalls(Instruction &I, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, const DataLayout &DL, bool &MadeCFGChange)
static bool foldMulHigh(Instruction &I)
Match high part of long multiplication.
static bool foldUnusualPatterns(Function &F, DominatorTree &DT, TargetTransformInfo &TTI, TargetLibraryInfo &TLI, AliasAnalysis &AA, AssumptionCache &AC, bool &MadeCFGChange)
This is the entry point for folds that could be implemented in regular InstCombine,...
AggressiveInstCombiner - Combine expression patterns to form expressions with fewer,...
This is the interface for LLVM's primary stateless and local alias analysis.
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static bool runImpl(Function &F, const TargetLowering &TLI, const LibcallLoweringInfo &Libcalls, AssumptionCache *AC)
#define DEBUG_TYPE
This is the interface for a simple mod/ref and alias analysis over globals.
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)
Match UB-safe variants of the funnel shift intrinsic.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t High
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static const MCExpr * MaskShift(const MCExpr *Val, uint32_t Mask, uint32_t Shift, MCContext &Ctx)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
BinaryOperator * Mul
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1353
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:381
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
Definition APInt.cpp:651
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1787
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1264
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1137
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static LLVM_ABI DebugLoc getMergedLocations(ArrayRef< DebugLoc > Locs)
Try to combine the vector of locations passed as input in a single one.
Definition DebugLoc.cpp:166
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
void applyUpdates(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
void SetCurrentDebugLocation(const DebugLoc &L)
Set location information used by debugging information.
Definition IRBuilder.h:247
UncondBrInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Definition IRBuilder.h:1231
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2532
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
Definition IRBuilder.h:1260
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2099
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Definition IRBuilder.h:2089
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:576
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2847
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
bool isSimple() const
static LocationSize precise(uint64_t Value)
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
size_type size() const
Definition MapVector.h:56
std::pair< KeyT, ValueT > & front()
Definition MapVector.h:79
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
static constexpr size_t npos
Definition StringRef.h:57
Multiway switch.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ None
The insert/extract is not used with a load/store.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_SizeAndLatency
The weighted sum of size and latency.
@ TCC_Basic
The cost of a typical 'add' instruction.
bool run(Function &F)
Perform TruncInst pattern optimization on given function.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
Definition Value.cpp:154
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:890
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
const ParentTy * getParent() const
Definition ilist_node.h:34
CallInst * Call
Changed
#define UINT64_MAX
Definition DataTypes.h:77
Abstract Attribute helper functions.
Definition Attributor.h:165
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition APInt.cpp:829
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ShiftLike_match< LHS, Instruction::LShr > m_LShrOrSelf(const LHS &L, uint64_t &R)
Matches lshr L, ConstShAmt or L itself (R will be set to zero in this case).
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, CastInst >, OpTy > m_CastOrSelf(const OpTy &Op)
Matches any cast or self. Used to ignore casts.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
auto m_Value()
Match an arbitrary value and ignore it.
ShiftLike_match< LHS, Instruction::Shl > m_ShlOrSelf(const LHS &L, uint64_t &R)
Matches shl L, ConstShAmt or L itself (R will be set to zero in this case).
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
specific_bbval m_SpecificBB(BasicBlock *BB)
Match a specific basic block value.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
initializer< Ty > init(const Ty &Val)
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:315
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
cl::opt< bool > ProfcheckDisableMetadataFixes
Definition LoopInfo.cpp:60
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
LLVM_ABI bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI=nullptr)
Scan the specified basic block and try to simplify any instructions in it and recursively delete dead...
Definition Local.cpp:723
LLVM_ABI void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName)
Specify that the branch weights for this terminator cannot be known at compile time.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)
Check whether the library function is available on target and also that it in the current Module is a...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1635
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isModOrRefSet(const ModRefInfo MRI)
Definition ModRef.h:43
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Other
Any other memory.
Definition ModRef.h:68
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:74
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Sub
Subtraction of integers.
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is either NaN or never less than -0....
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...
ValWidth bits starting at ValOffset of Val stored at PtrBase+PtrOffset.
bool operator<(const PartStore &Other) const
bool isCompatibleWith(const PartStore &Other) const
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
LLVM_ABI AAMDNodes concat(const AAMDNodes &Other) const
Determine the best AAMDNodes after concatenating two different locations together.
Matching combinators.
A MapVector that performs no allocations if smaller than a certain size.
Definition MapVector.h:276