Bug Summary

File:llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
Warning:line 230, column 10
2nd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InstCombineShifts.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Transforms/InstCombine -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Transforms/InstCombine -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp

1//===- InstCombineShifts.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitShl, visitLShr, and visitAShr functions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/Analysis/ConstantFolding.h"
15#include "llvm/Analysis/InstructionSimplify.h"
16#include "llvm/IR/IntrinsicInst.h"
17#include "llvm/IR/PatternMatch.h"
18#include "llvm/Transforms/InstCombine/InstCombiner.h"
19using namespace llvm;
20using namespace PatternMatch;
21
22#define DEBUG_TYPE"instcombine" "instcombine"
23
24bool canTryToConstantAddTwoShiftAmounts(Value *Sh0, Value *ShAmt0, Value *Sh1,
25 Value *ShAmt1) {
26 // We have two shift amounts from two different shifts. The types of those
27 // shift amounts may not match. If that's the case let's bailout now..
28 if (ShAmt0->getType() != ShAmt1->getType())
29 return false;
30
31 // As input, we have the following pattern:
32 // Sh0 (Sh1 X, Q), K
33 // We want to rewrite that as:
34 // Sh x, (Q+K) iff (Q+K) u< bitwidth(x)
35 // While we know that originally (Q+K) would not overflow
36 // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
37 // shift amounts. so it may now overflow in smaller bitwidth.
38 // To ensure that does not happen, we need to ensure that the total maximal
39 // shift amount is still representable in that smaller bit width.
40 unsigned MaximalPossibleTotalShiftAmount =
41 (Sh0->getType()->getScalarSizeInBits() - 1) +
42 (Sh1->getType()->getScalarSizeInBits() - 1);
43 APInt MaximalRepresentableShiftAmount =
44 APInt::getAllOnesValue(ShAmt0->getType()->getScalarSizeInBits());
45 return MaximalRepresentableShiftAmount.uge(MaximalPossibleTotalShiftAmount);
46}
47
48// Given pattern:
49// (x shiftopcode Q) shiftopcode K
50// we should rewrite it as
51// x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) and
52//
53// This is valid for any shift, but they must be identical, and we must be
54// careful in case we have (zext(Q)+zext(K)) and look past extensions,
55// (Q+K) must not overflow or else (Q+K) u< bitwidth(x) is bogus.
56//
57// AnalyzeForSignBitExtraction indicates that we will only analyze whether this
58// pattern has any 2 right-shifts that sum to 1 less than original bit width.
59Value *InstCombinerImpl::reassociateShiftAmtsOfTwoSameDirectionShifts(
60 BinaryOperator *Sh0, const SimplifyQuery &SQ,
61 bool AnalyzeForSignBitExtraction) {
62 // Look for a shift of some instruction, ignore zext of shift amount if any.
63 Instruction *Sh0Op0;
64 Value *ShAmt0;
65 if (!match(Sh0,
66 m_Shift(m_Instruction(Sh0Op0), m_ZExtOrSelf(m_Value(ShAmt0)))))
67 return nullptr;
68
69 // If there is a truncation between the two shifts, we must make note of it
70 // and look through it. The truncation imposes additional constraints on the
71 // transform.
72 Instruction *Sh1;
73 Value *Trunc = nullptr;
74 match(Sh0Op0,
75 m_CombineOr(m_CombineAnd(m_Trunc(m_Instruction(Sh1)), m_Value(Trunc)),
76 m_Instruction(Sh1)));
77
78 // Inner shift: (x shiftopcode ShAmt1)
79 // Like with other shift, ignore zext of shift amount if any.
80 Value *X, *ShAmt1;
81 if (!match(Sh1, m_Shift(m_Value(X), m_ZExtOrSelf(m_Value(ShAmt1)))))
82 return nullptr;
83
84 // Verify that it would be safe to try to add those two shift amounts.
85 if (!canTryToConstantAddTwoShiftAmounts(Sh0, ShAmt0, Sh1, ShAmt1))
86 return nullptr;
87
88 // We are only looking for signbit extraction if we have two right shifts.
89 bool HadTwoRightShifts = match(Sh0, m_Shr(m_Value(), m_Value())) &&
90 match(Sh1, m_Shr(m_Value(), m_Value()));
91 // ... and if it's not two right-shifts, we know the answer already.
92 if (AnalyzeForSignBitExtraction && !HadTwoRightShifts)
93 return nullptr;
94
95 // The shift opcodes must be identical, unless we are just checking whether
96 // this pattern can be interpreted as a sign-bit-extraction.
97 Instruction::BinaryOps ShiftOpcode = Sh0->getOpcode();
98 bool IdenticalShOpcodes = Sh0->getOpcode() == Sh1->getOpcode();
99 if (!IdenticalShOpcodes && !AnalyzeForSignBitExtraction)
100 return nullptr;
101
102 // If we saw truncation, we'll need to produce extra instruction,
103 // and for that one of the operands of the shift must be one-use,
104 // unless of course we don't actually plan to produce any instructions here.
105 if (Trunc && !AnalyzeForSignBitExtraction &&
106 !match(Sh0, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
107 return nullptr;
108
109 // Can we fold (ShAmt0+ShAmt1) ?
110 auto *NewShAmt = dyn_cast_or_null<Constant>(
111 SimplifyAddInst(ShAmt0, ShAmt1, /*isNSW=*/false, /*isNUW=*/false,
112 SQ.getWithInstruction(Sh0)));
113 if (!NewShAmt)
114 return nullptr; // Did not simplify.
115 unsigned NewShAmtBitWidth = NewShAmt->getType()->getScalarSizeInBits();
116 unsigned XBitWidth = X->getType()->getScalarSizeInBits();
117 // Is the new shift amount smaller than the bit width of inner/new shift?
118 if (!match(NewShAmt, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
119 APInt(NewShAmtBitWidth, XBitWidth))))
120 return nullptr; // FIXME: could perform constant-folding.
121
122 // If there was a truncation, and we have a right-shift, we can only fold if
123 // we are left with the original sign bit. Likewise, if we were just checking
124 // that this is a sighbit extraction, this is the place to check it.
125 // FIXME: zero shift amount is also legal here, but we can't *easily* check
126 // more than one predicate so it's not really worth it.
127 if (HadTwoRightShifts && (Trunc || AnalyzeForSignBitExtraction)) {
128 // If it's not a sign bit extraction, then we're done.
129 if (!match(NewShAmt,
130 m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
131 APInt(NewShAmtBitWidth, XBitWidth - 1))))
132 return nullptr;
133 // If it is, and that was the question, return the base value.
134 if (AnalyzeForSignBitExtraction)
135 return X;
136 }
137
138 assert(IdenticalShOpcodes && "Should not get here with different shifts.")((IdenticalShOpcodes && "Should not get here with different shifts."
) ? static_cast<void> (0) : __assert_fail ("IdenticalShOpcodes && \"Should not get here with different shifts.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 138, __PRETTY_FUNCTION__))
;
139
140 // All good, we can do this fold.
141 NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, X->getType());
142
143 BinaryOperator *NewShift = BinaryOperator::Create(ShiftOpcode, X, NewShAmt);
144
145 // The flags can only be propagated if there wasn't a trunc.
146 if (!Trunc) {
147 // If the pattern did not involve trunc, and both of the original shifts
148 // had the same flag set, preserve the flag.
149 if (ShiftOpcode == Instruction::BinaryOps::Shl) {
150 NewShift->setHasNoUnsignedWrap(Sh0->hasNoUnsignedWrap() &&
151 Sh1->hasNoUnsignedWrap());
152 NewShift->setHasNoSignedWrap(Sh0->hasNoSignedWrap() &&
153 Sh1->hasNoSignedWrap());
154 } else {
155 NewShift->setIsExact(Sh0->isExact() && Sh1->isExact());
156 }
157 }
158
159 Instruction *Ret = NewShift;
160 if (Trunc) {
161 Builder.Insert(NewShift);
162 Ret = CastInst::Create(Instruction::Trunc, NewShift, Sh0->getType());
163 }
164
165 return Ret;
166}
167
168// If we have some pattern that leaves only some low bits set, and then performs
169// left-shift of those bits, if none of the bits that are left after the final
170// shift are modified by the mask, we can omit the mask.
171//
172// There are many variants to this pattern:
173// a) (x & ((1 << MaskShAmt) - 1)) << ShiftShAmt
174// b) (x & (~(-1 << MaskShAmt))) << ShiftShAmt
175// c) (x & (-1 >> MaskShAmt)) << ShiftShAmt
176// d) (x & ((-1 << MaskShAmt) >> MaskShAmt)) << ShiftShAmt
177// e) ((x << MaskShAmt) l>> MaskShAmt) << ShiftShAmt
178// f) ((x << MaskShAmt) a>> MaskShAmt) << ShiftShAmt
179// All these patterns can be simplified to just:
180// x << ShiftShAmt
181// iff:
182// a,b) (MaskShAmt+ShiftShAmt) u>= bitwidth(x)
183// c,d,e,f) (ShiftShAmt-MaskShAmt) s>= 0 (i.e. ShiftShAmt u>= MaskShAmt)
184static Instruction *
185dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift,
186 const SimplifyQuery &Q,
187 InstCombiner::BuilderTy &Builder) {
188 assert(OuterShift->getOpcode() == Instruction::BinaryOps::Shl &&((OuterShift->getOpcode() == Instruction::BinaryOps::Shl &&
"The input must be 'shl'!") ? static_cast<void> (0) : __assert_fail
("OuterShift->getOpcode() == Instruction::BinaryOps::Shl && \"The input must be 'shl'!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 189, __PRETTY_FUNCTION__))
8
Assuming the condition is true
9
'?' condition is true
189 "The input must be 'shl'!")((OuterShift->getOpcode() == Instruction::BinaryOps::Shl &&
"The input must be 'shl'!") ? static_cast<void> (0) : __assert_fail
("OuterShift->getOpcode() == Instruction::BinaryOps::Shl && \"The input must be 'shl'!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 189, __PRETTY_FUNCTION__))
;
190
191 Value *Masked, *ShiftShAmt;
10
'ShiftShAmt' declared without an initial value
192 match(OuterShift,
193 m_Shift(m_Value(Masked), m_ZExtOrSelf(m_Value(ShiftShAmt))));
11
Calling 'm_Value'
15
Returning from 'm_Value'
16
Calling 'm_ZExtOrSelf<llvm::PatternMatch::bind_ty<llvm::Value>>'
24
Returning from 'm_ZExtOrSelf<llvm::PatternMatch::bind_ty<llvm::Value>>'
25
Calling 'm_Shift<llvm::PatternMatch::bind_ty<llvm::Value>, llvm::PatternMatch::match_combine_or<llvm::PatternMatch::CastClass_match<llvm::PatternMatch::bind_ty<llvm::Value>, 39>, llvm::PatternMatch::bind_ty<llvm::Value>>>'
27
Returning from 'm_Shift<llvm::PatternMatch::bind_ty<llvm::Value>, llvm::PatternMatch::match_combine_or<llvm::PatternMatch::CastClass_match<llvm::PatternMatch::bind_ty<llvm::Value>, 39>, llvm::PatternMatch::bind_ty<llvm::Value>>>'
194
195 // *If* there is a truncation between an outer shift and a possibly-mask,
196 // then said truncation *must* be one-use, else we can't perform the fold.
197 Value *Trunc;
198 if (match(Masked, m_CombineAnd(m_Trunc(m_Value(Masked)), m_Value(Trunc))) &&
28
Taking false branch
199 !Trunc->hasOneUse())
200 return nullptr;
201
202 Type *NarrowestTy = OuterShift->getType();
203 Type *WidestTy = Masked->getType();
204 bool HadTrunc = WidestTy != NarrowestTy;
29
Assuming 'WidestTy' is equal to 'NarrowestTy'
205
206 // The mask must be computed in a type twice as wide to ensure
207 // that no bits are lost if the sum-of-shifts is wider than the base type.
208 Type *ExtendedTy = WidestTy->getExtendedType();
209
210 Value *MaskShAmt;
211
212 // ((1 << MaskShAmt) - 1)
213 auto MaskA = m_Add(m_Shl(m_One(), m_Value(MaskShAmt)), m_AllOnes());
214 // (~(-1 << maskNbits))
215 auto MaskB = m_Xor(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_AllOnes());
216 // (-1 >> MaskShAmt)
217 auto MaskC = m_Shr(m_AllOnes(), m_Value(MaskShAmt));
218 // ((-1 << MaskShAmt) >> MaskShAmt)
219 auto MaskD =
220 m_Shr(m_Shl(m_AllOnes(), m_Value(MaskShAmt)), m_Deferred(MaskShAmt));
221
222 Value *X;
223 Constant *NewMask;
224
225 if (match(Masked, m_c_And(m_CombineOr(MaskA, MaskB), m_Value(X)))) {
30
Calling 'match<llvm::Value, llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::match_combine_or<llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_one, llvm::ConstantInt>, llvm::PatternMatch::bind_ty<llvm::Value>, 25, false>, llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_all_ones, llvm::ConstantInt>, 13, false>, llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_all_ones, llvm::ConstantInt>, llvm::PatternMatch::bind_ty<llvm::Value>, 25, false>, llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_all_ones, llvm::ConstantInt>, 30, false>>, llvm::PatternMatch::bind_ty<llvm::Value>, 28, true>>'
38
Returning from 'match<llvm::Value, llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::match_combine_or<llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_one, llvm::ConstantInt>, llvm::PatternMatch::bind_ty<llvm::Value>, 25, false>, llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_all_ones, llvm::ConstantInt>, 13, false>, llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::BinaryOp_match<llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_all_ones, llvm::ConstantInt>, llvm::PatternMatch::bind_ty<llvm::Value>, 25, false>, llvm::PatternMatch::cstval_pred_ty<llvm::PatternMatch::is_all_ones, llvm::ConstantInt>, 30, false>>, llvm::PatternMatch::bind_ty<llvm::Value>, 28, true>>'
39
Taking true branch
226 // Peek through an optional zext of the shift amount.
227 match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
228
229 // Verify that it would be safe to try to add those two shift amounts.
230 if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
40
2nd function call argument is an uninitialized value
231 MaskShAmt))
232 return nullptr;
233
234 // Can we simplify (MaskShAmt+ShiftShAmt) ?
235 auto *SumOfShAmts = dyn_cast_or_null<Constant>(SimplifyAddInst(
236 MaskShAmt, ShiftShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
237 if (!SumOfShAmts)
238 return nullptr; // Did not simplify.
239 // In this pattern SumOfShAmts correlates with the number of low bits
240 // that shall remain in the root value (OuterShift).
241
242 // An extend of an undef value becomes zero because the high bits are never
243 // completely unknown. Replace the the `undef` shift amounts with final
244 // shift bitwidth to ensure that the value remains undef when creating the
245 // subsequent shift op.
246 SumOfShAmts = Constant::replaceUndefsWith(
247 SumOfShAmts, ConstantInt::get(SumOfShAmts->getType()->getScalarType(),
248 ExtendedTy->getScalarSizeInBits()));
249 auto *ExtendedSumOfShAmts = ConstantExpr::getZExt(SumOfShAmts, ExtendedTy);
250 // And compute the mask as usual: ~(-1 << (SumOfShAmts))
251 auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
252 auto *ExtendedInvertedMask =
253 ConstantExpr::getShl(ExtendedAllOnes, ExtendedSumOfShAmts);
254 NewMask = ConstantExpr::getNot(ExtendedInvertedMask);
255 } else if (match(Masked, m_c_And(m_CombineOr(MaskC, MaskD), m_Value(X))) ||
256 match(Masked, m_Shr(m_Shl(m_Value(X), m_Value(MaskShAmt)),
257 m_Deferred(MaskShAmt)))) {
258 // Peek through an optional zext of the shift amount.
259 match(MaskShAmt, m_ZExtOrSelf(m_Value(MaskShAmt)));
260
261 // Verify that it would be safe to try to add those two shift amounts.
262 if (!canTryToConstantAddTwoShiftAmounts(OuterShift, ShiftShAmt, Masked,
263 MaskShAmt))
264 return nullptr;
265
266 // Can we simplify (ShiftShAmt-MaskShAmt) ?
267 auto *ShAmtsDiff = dyn_cast_or_null<Constant>(SimplifySubInst(
268 ShiftShAmt, MaskShAmt, /*IsNSW=*/false, /*IsNUW=*/false, Q));
269 if (!ShAmtsDiff)
270 return nullptr; // Did not simplify.
271 // In this pattern ShAmtsDiff correlates with the number of high bits that
272 // shall be unset in the root value (OuterShift).
273
274 // An extend of an undef value becomes zero because the high bits are never
275 // completely unknown. Replace the the `undef` shift amounts with negated
276 // bitwidth of innermost shift to ensure that the value remains undef when
277 // creating the subsequent shift op.
278 unsigned WidestTyBitWidth = WidestTy->getScalarSizeInBits();
279 ShAmtsDiff = Constant::replaceUndefsWith(
280 ShAmtsDiff, ConstantInt::get(ShAmtsDiff->getType()->getScalarType(),
281 -WidestTyBitWidth));
282 auto *ExtendedNumHighBitsToClear = ConstantExpr::getZExt(
283 ConstantExpr::getSub(ConstantInt::get(ShAmtsDiff->getType(),
284 WidestTyBitWidth,
285 /*isSigned=*/false),
286 ShAmtsDiff),
287 ExtendedTy);
288 // And compute the mask as usual: (-1 l>> (NumHighBitsToClear))
289 auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy);
290 NewMask =
291 ConstantExpr::getLShr(ExtendedAllOnes, ExtendedNumHighBitsToClear);
292 } else
293 return nullptr; // Don't know anything about this pattern.
294
295 NewMask = ConstantExpr::getTrunc(NewMask, NarrowestTy);
296
297 // Does this mask has any unset bits? If not then we can just not apply it.
298 bool NeedMask = !match(NewMask, m_AllOnes());
299
300 // If we need to apply a mask, there are several more restrictions we have.
301 if (NeedMask) {
302 // The old masking instruction must go away.
303 if (!Masked->hasOneUse())
304 return nullptr;
305 // The original "masking" instruction must not have been`ashr`.
306 if (match(Masked, m_AShr(m_Value(), m_Value())))
307 return nullptr;
308 }
309
310 // If we need to apply truncation, let's do it first, since we can.
311 // We have already ensured that the old truncation will go away.
312 if (HadTrunc)
313 X = Builder.CreateTrunc(X, NarrowestTy);
314
315 // No 'NUW'/'NSW'! We no longer know that we won't shift-out non-0 bits.
316 // We didn't change the Type of this outermost shift, so we can just do it.
317 auto *NewShift = BinaryOperator::Create(OuterShift->getOpcode(), X,
318 OuterShift->getOperand(1));
319 if (!NeedMask)
320 return NewShift;
321
322 Builder.Insert(NewShift);
323 return BinaryOperator::Create(Instruction::And, NewShift, NewMask);
324}
325
326/// If we have a shift-by-constant of a bitwise logic op that itself has a
327/// shift-by-constant operand with identical opcode, we may be able to convert
328/// that into 2 independent shifts followed by the logic op. This eliminates a
329/// a use of an intermediate value (reduces dependency chain).
330static Instruction *foldShiftOfShiftedLogic(BinaryOperator &I,
331 InstCombiner::BuilderTy &Builder) {
332 assert(I.isShift() && "Expected a shift as input")((I.isShift() && "Expected a shift as input") ? static_cast
<void> (0) : __assert_fail ("I.isShift() && \"Expected a shift as input\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 332, __PRETTY_FUNCTION__))
;
333 auto *LogicInst = dyn_cast<BinaryOperator>(I.getOperand(0));
334 if (!LogicInst || !LogicInst->isBitwiseLogicOp() || !LogicInst->hasOneUse())
335 return nullptr;
336
337 Constant *C0, *C1;
338 if (!match(I.getOperand(1), m_Constant(C1)))
339 return nullptr;
340
341 Instruction::BinaryOps ShiftOpcode = I.getOpcode();
342 Type *Ty = I.getType();
343
344 // Find a matching one-use shift by constant. The fold is not valid if the sum
345 // of the shift values equals or exceeds bitwidth.
346 // TODO: Remove the one-use check if the other logic operand (Y) is constant.
347 Value *X, *Y;
348 auto matchFirstShift = [&](Value *V) {
349 BinaryOperator *BO;
350 APInt Threshold(Ty->getScalarSizeInBits(), Ty->getScalarSizeInBits());
351 return match(V, m_BinOp(BO)) && BO->getOpcode() == ShiftOpcode &&
352 match(V, m_OneUse(m_Shift(m_Value(X), m_Constant(C0)))) &&
353 match(ConstantExpr::getAdd(C0, C1),
354 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold));
355 };
356
357 // Logic ops are commutative, so check each operand for a match.
358 if (matchFirstShift(LogicInst->getOperand(0)))
359 Y = LogicInst->getOperand(1);
360 else if (matchFirstShift(LogicInst->getOperand(1)))
361 Y = LogicInst->getOperand(0);
362 else
363 return nullptr;
364
365 // shift (logic (shift X, C0), Y), C1 -> logic (shift X, C0+C1), (shift Y, C1)
366 Constant *ShiftSumC = ConstantExpr::getAdd(C0, C1);
367 Value *NewShift1 = Builder.CreateBinOp(ShiftOpcode, X, ShiftSumC);
368 Value *NewShift2 = Builder.CreateBinOp(ShiftOpcode, Y, I.getOperand(1));
369 return BinaryOperator::Create(LogicInst->getOpcode(), NewShift1, NewShift2);
370}
371
372Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) {
373 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
374 assert(Op0->getType() == Op1->getType())((Op0->getType() == Op1->getType()) ? static_cast<void
> (0) : __assert_fail ("Op0->getType() == Op1->getType()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 374, __PRETTY_FUNCTION__))
;
375
376 // If the shift amount is a one-use `sext`, we can demote it to `zext`.
377 Value *Y;
378 if (match(Op1, m_OneUse(m_SExt(m_Value(Y))))) {
379 Value *NewExt = Builder.CreateZExt(Y, I.getType(), Op1->getName());
380 return BinaryOperator::Create(I.getOpcode(), Op0, NewExt);
381 }
382
383 // See if we can fold away this shift.
384 if (SimplifyDemandedInstructionBits(I))
385 return &I;
386
387 // Try to fold constant and into select arguments.
388 if (isa<Constant>(Op0))
389 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
390 if (Instruction *R = FoldOpIntoSelect(I, SI))
391 return R;
392
393 if (Constant *CUI = dyn_cast<Constant>(Op1))
394 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
395 return Res;
396
397 if (auto *NewShift = cast_or_null<Instruction>(
398 reassociateShiftAmtsOfTwoSameDirectionShifts(&I, SQ)))
399 return NewShift;
400
401 // (C1 shift (A add C2)) -> (C1 shift C2) shift A)
402 // iff A and C2 are both positive.
403 Value *A;
404 Constant *C;
405 if (match(Op0, m_Constant()) && match(Op1, m_Add(m_Value(A), m_Constant(C))))
406 if (isKnownNonNegative(A, DL, 0, &AC, &I, &DT) &&
407 isKnownNonNegative(C, DL, 0, &AC, &I, &DT))
408 return BinaryOperator::Create(
409 I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), Op0, C), A);
410
411 // X shift (A srem C) -> X shift (A and (C - 1)) iff C is a power of 2.
412 // Because shifts by negative values (which could occur if A were negative)
413 // are undefined.
414 if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Constant(C))) &&
415 match(C, m_Power2())) {
416 // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
417 // demand the sign bit (and many others) here??
418 Constant *Mask = ConstantExpr::getSub(C, ConstantInt::get(I.getType(), 1));
419 Value *Rem = Builder.CreateAnd(A, Mask, Op1->getName());
420 return replaceOperand(I, 1, Rem);
421 }
422
423 if (Instruction *Logic = foldShiftOfShiftedLogic(I, Builder))
424 return Logic;
425
426 return nullptr;
427}
428
429/// Return true if we can simplify two logical (either left or right) shifts
430/// that have constant shift amounts: OuterShift (InnerShift X, C1), C2.
431static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
432 Instruction *InnerShift,
433 InstCombinerImpl &IC, Instruction *CxtI) {
434 assert(InnerShift->isLogicalShift() && "Unexpected instruction type")((InnerShift->isLogicalShift() && "Unexpected instruction type"
) ? static_cast<void> (0) : __assert_fail ("InnerShift->isLogicalShift() && \"Unexpected instruction type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 434, __PRETTY_FUNCTION__))
;
435
436 // We need constant scalar or constant splat shifts.
437 const APInt *InnerShiftConst;
438 if (!match(InnerShift->getOperand(1), m_APInt(InnerShiftConst)))
439 return false;
440
441 // Two logical shifts in the same direction:
442 // shl (shl X, C1), C2 --> shl X, C1 + C2
443 // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
444 bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
445 if (IsInnerShl == IsOuterShl)
446 return true;
447
448 // Equal shift amounts in opposite directions become bitwise 'and':
449 // lshr (shl X, C), C --> and X, C'
450 // shl (lshr X, C), C --> and X, C'
451 if (*InnerShiftConst == OuterShAmt)
452 return true;
453
454 // If the 2nd shift is bigger than the 1st, we can fold:
455 // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3
456 // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3
457 // but it isn't profitable unless we know the and'd out bits are already zero.
458 // Also, check that the inner shift is valid (less than the type width) or
459 // we'll crash trying to produce the bit mask for the 'and'.
460 unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits();
461 if (InnerShiftConst->ugt(OuterShAmt) && InnerShiftConst->ult(TypeWidth)) {
462 unsigned InnerShAmt = InnerShiftConst->getZExtValue();
463 unsigned MaskShift =
464 IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt;
465 APInt Mask = APInt::getLowBitsSet(TypeWidth, OuterShAmt) << MaskShift;
466 if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI))
467 return true;
468 }
469
470 return false;
471}
472
473/// See if we can compute the specified value, but shifted logically to the left
474/// or right by some number of bits. This should return true if the expression
475/// can be computed for the same cost as the current expression tree. This is
476/// used to eliminate extraneous shifting from things like:
477/// %C = shl i128 %A, 64
478/// %D = shl i128 %B, 96
479/// %E = or i128 %C, %D
480/// %F = lshr i128 %E, 64
481/// where the client will ask if E can be computed shifted right by 64-bits. If
482/// this succeeds, getShiftedValue() will be called to produce the value.
483static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
484 InstCombinerImpl &IC, Instruction *CxtI) {
485 // We can always evaluate constants shifted.
486 if (isa<Constant>(V))
487 return true;
488
489 Instruction *I = dyn_cast<Instruction>(V);
490 if (!I) return false;
491
492 // We can't mutate something that has multiple uses: doing so would
493 // require duplicating the instruction in general, which isn't profitable.
494 if (!I->hasOneUse()) return false;
495
496 switch (I->getOpcode()) {
497 default: return false;
498 case Instruction::And:
499 case Instruction::Or:
500 case Instruction::Xor:
501 // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
502 return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) &&
503 canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I);
504
505 case Instruction::Shl:
506 case Instruction::LShr:
507 return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI);
508
509 case Instruction::Select: {
510 SelectInst *SI = cast<SelectInst>(I);
511 Value *TrueVal = SI->getTrueValue();
512 Value *FalseVal = SI->getFalseValue();
513 return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) &&
514 canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI);
515 }
516 case Instruction::PHI: {
517 // We can change a phi if we can change all operands. Note that we never
518 // get into trouble with cyclic PHIs here because we only consider
519 // instructions with a single use.
520 PHINode *PN = cast<PHINode>(I);
521 for (Value *IncValue : PN->incoming_values())
522 if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN))
523 return false;
524 return true;
525 }
526 }
527}
528
529/// Fold OuterShift (InnerShift X, C1), C2.
530/// See canEvaluateShiftedShift() for the constraints on these instructions.
531static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt,
532 bool IsOuterShl,
533 InstCombiner::BuilderTy &Builder) {
534 bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
535 Type *ShType = InnerShift->getType();
536 unsigned TypeWidth = ShType->getScalarSizeInBits();
537
538 // We only accept shifts-by-a-constant in canEvaluateShifted().
539 const APInt *C1;
540 match(InnerShift->getOperand(1), m_APInt(C1));
541 unsigned InnerShAmt = C1->getZExtValue();
542
543 // Change the shift amount and clear the appropriate IR flags.
544 auto NewInnerShift = [&](unsigned ShAmt) {
545 InnerShift->setOperand(1, ConstantInt::get(ShType, ShAmt));
546 if (IsInnerShl) {
547 InnerShift->setHasNoUnsignedWrap(false);
548 InnerShift->setHasNoSignedWrap(false);
549 } else {
550 InnerShift->setIsExact(false);
551 }
552 return InnerShift;
553 };
554
555 // Two logical shifts in the same direction:
556 // shl (shl X, C1), C2 --> shl X, C1 + C2
557 // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
558 if (IsInnerShl == IsOuterShl) {
559 // If this is an oversized composite shift, then unsigned shifts get 0.
560 if (InnerShAmt + OuterShAmt >= TypeWidth)
561 return Constant::getNullValue(ShType);
562
563 return NewInnerShift(InnerShAmt + OuterShAmt);
564 }
565
566 // Equal shift amounts in opposite directions become bitwise 'and':
567 // lshr (shl X, C), C --> and X, C'
568 // shl (lshr X, C), C --> and X, C'
569 if (InnerShAmt == OuterShAmt) {
570 APInt Mask = IsInnerShl
571 ? APInt::getLowBitsSet(TypeWidth, TypeWidth - OuterShAmt)
572 : APInt::getHighBitsSet(TypeWidth, TypeWidth - OuterShAmt);
573 Value *And = Builder.CreateAnd(InnerShift->getOperand(0),
574 ConstantInt::get(ShType, Mask));
575 if (auto *AndI = dyn_cast<Instruction>(And)) {
576 AndI->moveBefore(InnerShift);
577 AndI->takeName(InnerShift);
578 }
579 return And;
580 }
581
582 assert(InnerShAmt > OuterShAmt &&((InnerShAmt > OuterShAmt && "Unexpected opposite direction logical shift pair"
) ? static_cast<void> (0) : __assert_fail ("InnerShAmt > OuterShAmt && \"Unexpected opposite direction logical shift pair\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 583, __PRETTY_FUNCTION__))
583 "Unexpected opposite direction logical shift pair")((InnerShAmt > OuterShAmt && "Unexpected opposite direction logical shift pair"
) ? static_cast<void> (0) : __assert_fail ("InnerShAmt > OuterShAmt && \"Unexpected opposite direction logical shift pair\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 583, __PRETTY_FUNCTION__))
;
584
585 // In general, we would need an 'and' for this transform, but
586 // canEvaluateShiftedShift() guarantees that the masked-off bits are not used.
587 // lshr (shl X, C1), C2 --> shl X, C1 - C2
588 // shl (lshr X, C1), C2 --> lshr X, C1 - C2
589 return NewInnerShift(InnerShAmt - OuterShAmt);
590}
591
592/// When canEvaluateShifted() returns true for an expression, this function
593/// inserts the new computation that produces the shifted value.
594static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
595 InstCombinerImpl &IC, const DataLayout &DL) {
596 // We can always evaluate constants shifted.
597 if (Constant *C = dyn_cast<Constant>(V)) {
598 if (isLeftShift)
599 return IC.Builder.CreateShl(C, NumBits);
600 else
601 return IC.Builder.CreateLShr(C, NumBits);
602 }
603
604 Instruction *I = cast<Instruction>(V);
605 IC.addToWorklist(I);
606
607 switch (I->getOpcode()) {
608 default: llvm_unreachable("Inconsistency with CanEvaluateShifted")::llvm::llvm_unreachable_internal("Inconsistency with CanEvaluateShifted"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 608)
;
609 case Instruction::And:
610 case Instruction::Or:
611 case Instruction::Xor:
612 // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
613 I->setOperand(
614 0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
615 I->setOperand(
616 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
617 return I;
618
619 case Instruction::Shl:
620 case Instruction::LShr:
621 return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
622 IC.Builder);
623
624 case Instruction::Select:
625 I->setOperand(
626 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
627 I->setOperand(
628 2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
629 return I;
630 case Instruction::PHI: {
631 // We can change a phi if we can change all operands. Note that we never
632 // get into trouble with cyclic PHIs here because we only consider
633 // instructions with a single use.
634 PHINode *PN = cast<PHINode>(I);
635 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
636 PN->setIncomingValue(i, getShiftedValue(PN->getIncomingValue(i), NumBits,
637 isLeftShift, IC, DL));
638 return PN;
639 }
640 }
641}
642
643// If this is a bitwise operator or add with a constant RHS we might be able
644// to pull it through a shift.
645static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift,
646 BinaryOperator *BO) {
647 switch (BO->getOpcode()) {
648 default:
649 return false; // Do not perform transform!
650 case Instruction::Add:
651 return Shift.getOpcode() == Instruction::Shl;
652 case Instruction::Or:
653 case Instruction::And:
654 return true;
655 case Instruction::Xor:
656 // Do not change a 'not' of logical shift because that would create a normal
657 // 'xor'. The 'not' is likely better for analysis, SCEV, and codegen.
658 return !(Shift.isLogicalShift() && match(BO, m_Not(m_Value())));
659 }
660}
661
662Instruction *InstCombinerImpl::FoldShiftByConstant(Value *Op0, Constant *Op1,
663 BinaryOperator &I) {
664 bool isLeftShift = I.getOpcode() == Instruction::Shl;
665
666 const APInt *Op1C;
667 if (!match(Op1, m_APInt(Op1C)))
668 return nullptr;
669
670 // See if we can propagate this shift into the input, this covers the trivial
671 // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
672 if (I.getOpcode() != Instruction::AShr &&
673 canEvaluateShifted(Op0, Op1C->getZExtValue(), isLeftShift, *this, &I)) {
674 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I << "\n"; } } while (false)
675 dbgs() << "ICE: GetShiftedValue propagating shift through expression"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I << "\n"; } } while (false)
676 " to eliminate shift:\n IN: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I << "\n"; } } while (false)
677 << *Op0 << "\n SH: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I << "\n"; } } while (false)
;
678
679 return replaceInstUsesWith(
680 I, getShiftedValue(Op0, Op1C->getZExtValue(), isLeftShift, *this, DL));
681 }
682
683 // See if we can simplify any instructions used by the instruction whose sole
684 // purpose is to compute bits we don't care about.
685 Type *Ty = I.getType();
686 unsigned TypeBits = Ty->getScalarSizeInBits();
687 assert(!Op1C->uge(TypeBits) &&((!Op1C->uge(TypeBits) && "Shift over the type width should have been removed already"
) ? static_cast<void> (0) : __assert_fail ("!Op1C->uge(TypeBits) && \"Shift over the type width should have been removed already\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 688, __PRETTY_FUNCTION__))
688 "Shift over the type width should have been removed already")((!Op1C->uge(TypeBits) && "Shift over the type width should have been removed already"
) ? static_cast<void> (0) : __assert_fail ("!Op1C->uge(TypeBits) && \"Shift over the type width should have been removed already\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 688, __PRETTY_FUNCTION__))
;
689
690 if (Instruction *FoldedShift = foldBinOpIntoSelectOrPhi(I))
691 return FoldedShift;
692
693 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
694 if (auto *TI = dyn_cast<TruncInst>(Op0)) {
695 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
696 // place. Don't try to do this transformation in this case. Also, we
697 // require that the input operand is a shift-by-constant so that we have
698 // confidence that the shifts will get folded together. We could do this
699 // xform in more cases, but it is unlikely to be profitable.
700 const APInt *TrShiftAmt;
701 if (I.isLogicalShift() &&
702 match(TI->getOperand(0), m_Shift(m_Value(), m_APInt(TrShiftAmt)))) {
703 auto *TrOp = cast<Instruction>(TI->getOperand(0));
704 Type *SrcTy = TrOp->getType();
705
706 // Okay, we'll do this xform. Make the shift of shift.
707 Constant *ShAmt = ConstantExpr::getZExt(Op1, SrcTy);
708 // (shift2 (shift1 & 0x00FF), c2)
709 Value *NSh = Builder.CreateBinOp(I.getOpcode(), TrOp, ShAmt, I.getName());
710
711 // For logical shifts, the truncation has the effect of making the high
712 // part of the register be zeros. Emulate this by inserting an AND to
713 // clear the top bits as needed. This 'and' will usually be zapped by
714 // other xforms later if dead.
715 unsigned SrcSize = SrcTy->getScalarSizeInBits();
716 Constant *MaskV =
717 ConstantInt::get(SrcTy, APInt::getLowBitsSet(SrcSize, TypeBits));
718
719 // The mask we constructed says what the trunc would do if occurring
720 // between the shifts. We want to know the effect *after* the second
721 // shift. We know that it is a logical shift by a constant, so adjust the
722 // mask as appropriate.
723 MaskV = ConstantExpr::get(I.getOpcode(), MaskV, ShAmt);
724 // shift1 & 0x00FF
725 Value *And = Builder.CreateAnd(NSh, MaskV, TI->getName());
726 // Return the value truncated to the interesting size.
727 return new TruncInst(And, Ty);
728 }
729 }
730
731 if (Op0->hasOneUse()) {
732 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
733 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
734 Value *V1;
735 const APInt *CC;
736 switch (Op0BO->getOpcode()) {
737 default: break;
738 case Instruction::Add:
739 case Instruction::And:
740 case Instruction::Or:
741 case Instruction::Xor: {
742 // These operators commute.
743 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
744 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
745 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
746 m_Specific(Op1)))) {
747 Value *YS = // (Y << C)
748 Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
749 // (X + (Y << C))
750 Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), YS, V1,
751 Op0BO->getOperand(1)->getName());
752 unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
753 APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
754 Constant *Mask = ConstantInt::get(Ty, Bits);
755 return BinaryOperator::CreateAnd(X, Mask);
756 }
757
758 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
759 Value *Op0BOOp1 = Op0BO->getOperand(1);
760 if (isLeftShift && Op0BOOp1->hasOneUse() &&
761 match(Op0BOOp1, m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
762 m_APInt(CC)))) {
763 Value *YS = // (Y << C)
764 Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
765 // X & (CC << C)
766 Value *XM = Builder.CreateAnd(
767 V1, ConstantExpr::getShl(ConstantInt::get(Ty, *CC), Op1),
768 V1->getName() + ".mask");
769 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
770 }
771 LLVM_FALLTHROUGH[[gnu::fallthrough]];
772 }
773
774 case Instruction::Sub: {
775 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
776 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
777 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
778 m_Specific(Op1)))) {
779 Value *YS = // (Y << C)
780 Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
781 // (X + (Y << C))
782 Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), V1, YS,
783 Op0BO->getOperand(0)->getName());
784 unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
785 APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
786 Constant *Mask = ConstantInt::get(Ty, Bits);
787 return BinaryOperator::CreateAnd(X, Mask);
788 }
789
790 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
791 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
792 match(Op0BO->getOperand(0),
793 m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
794 m_APInt(CC)))) {
795 Value *YS = // (Y << C)
796 Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
797 // X & (CC << C)
798 Value *XM = Builder.CreateAnd(
799 V1, ConstantExpr::getShl(ConstantInt::get(Ty, *CC), Op1),
800 V1->getName() + ".mask");
801 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
802 }
803
804 break;
805 }
806 }
807
808 // If the operand is a bitwise operator with a constant RHS, and the
809 // shift is the only use, we can pull it out of the shift.
810 const APInt *Op0C;
811 if (match(Op0BO->getOperand(1), m_APInt(Op0C))) {
812 if (canShiftBinOpWithConstantRHS(I, Op0BO)) {
813 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
814 cast<Constant>(Op0BO->getOperand(1)), Op1);
815
816 Value *NewShift =
817 Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
818 NewShift->takeName(Op0BO);
819
820 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
821 NewRHS);
822 }
823 }
824
825 // If the operand is a subtract with a constant LHS, and the shift
826 // is the only use, we can pull it out of the shift.
827 // This folds (shl (sub C1, X), C2) -> (sub (C1 << C2), (shl X, C2))
828 if (isLeftShift && Op0BO->getOpcode() == Instruction::Sub &&
829 match(Op0BO->getOperand(0), m_APInt(Op0C))) {
830 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
831 cast<Constant>(Op0BO->getOperand(0)), Op1);
832
833 Value *NewShift = Builder.CreateShl(Op0BO->getOperand(1), Op1);
834 NewShift->takeName(Op0BO);
835
836 return BinaryOperator::CreateSub(NewRHS, NewShift);
837 }
838 }
839
840 // If we have a select that conditionally executes some binary operator,
841 // see if we can pull it the select and operator through the shift.
842 //
843 // For example, turning:
844 // shl (select C, (add X, C1), X), C2
845 // Into:
846 // Y = shl X, C2
847 // select C, (add Y, C1 << C2), Y
848 Value *Cond;
849 BinaryOperator *TBO;
850 Value *FalseVal;
851 if (match(Op0, m_Select(m_Value(Cond), m_OneUse(m_BinOp(TBO)),
852 m_Value(FalseVal)))) {
853 const APInt *C;
854 if (!isa<Constant>(FalseVal) && TBO->getOperand(0) == FalseVal &&
855 match(TBO->getOperand(1), m_APInt(C)) &&
856 canShiftBinOpWithConstantRHS(I, TBO)) {
857 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
858 cast<Constant>(TBO->getOperand(1)), Op1);
859
860 Value *NewShift =
861 Builder.CreateBinOp(I.getOpcode(), FalseVal, Op1);
862 Value *NewOp = Builder.CreateBinOp(TBO->getOpcode(), NewShift,
863 NewRHS);
864 return SelectInst::Create(Cond, NewOp, NewShift);
865 }
866 }
867
868 BinaryOperator *FBO;
869 Value *TrueVal;
870 if (match(Op0, m_Select(m_Value(Cond), m_Value(TrueVal),
871 m_OneUse(m_BinOp(FBO))))) {
872 const APInt *C;
873 if (!isa<Constant>(TrueVal) && FBO->getOperand(0) == TrueVal &&
874 match(FBO->getOperand(1), m_APInt(C)) &&
875 canShiftBinOpWithConstantRHS(I, FBO)) {
876 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
877 cast<Constant>(FBO->getOperand(1)), Op1);
878
879 Value *NewShift =
880 Builder.CreateBinOp(I.getOpcode(), TrueVal, Op1);
881 Value *NewOp = Builder.CreateBinOp(FBO->getOpcode(), NewShift,
882 NewRHS);
883 return SelectInst::Create(Cond, NewShift, NewOp);
884 }
885 }
886 }
887
888 return nullptr;
889}
890
891Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
892 const SimplifyQuery Q = SQ.getWithInstruction(&I);
893
894 if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
1
Assuming 'V' is null
2
Taking false branch
895 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), Q))
896 return replaceInstUsesWith(I, V);
897
898 if (Instruction *X = foldVectorBinop(I))
3
Assuming 'X' is null
4
Taking false branch
899 return X;
900
901 if (Instruction *V = commonShiftTransforms(I))
5
Assuming 'V' is null
6
Taking false branch
902 return V;
903
904 if (Instruction *V = dropRedundantMaskingOfLeftShiftInput(&I, Q, Builder))
7
Calling 'dropRedundantMaskingOfLeftShiftInput'
905 return V;
906
907 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
908 Type *Ty = I.getType();
909 unsigned BitWidth = Ty->getScalarSizeInBits();
910
911 const APInt *ShAmtAPInt;
912 if (match(Op1, m_APInt(ShAmtAPInt))) {
913 unsigned ShAmt = ShAmtAPInt->getZExtValue();
914
915 // shl (zext X), ShAmt --> zext (shl X, ShAmt)
916 // This is only valid if X would have zeros shifted out.
917 Value *X;
918 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
919 unsigned SrcWidth = X->getType()->getScalarSizeInBits();
920 if (ShAmt < SrcWidth &&
921 MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmt), 0, &I))
922 return new ZExtInst(Builder.CreateShl(X, ShAmt), Ty);
923 }
924
925 // (X >> C) << C --> X & (-1 << C)
926 if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1)))) {
927 APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
928 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
929 }
930
931 const APInt *ShOp1;
932 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_APInt(ShOp1)))) &&
933 ShOp1->ult(BitWidth)) {
934 unsigned ShrAmt = ShOp1->getZExtValue();
935 if (ShrAmt < ShAmt) {
936 // If C1 < C2: (X >>?,exact C1) << C2 --> X << (C2 - C1)
937 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShrAmt);
938 auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
939 NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
940 NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
941 return NewShl;
942 }
943 if (ShrAmt > ShAmt) {
944 // If C1 > C2: (X >>?exact C1) << C2 --> X >>?exact (C1 - C2)
945 Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmt);
946 auto *NewShr = BinaryOperator::Create(
947 cast<BinaryOperator>(Op0)->getOpcode(), X, ShiftDiff);
948 NewShr->setIsExact(true);
949 return NewShr;
950 }
951 }
952
953 if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_APInt(ShOp1)))) &&
954 ShOp1->ult(BitWidth)) {
955 unsigned ShrAmt = ShOp1->getZExtValue();
956 if (ShrAmt < ShAmt) {
957 // If C1 < C2: (X >>? C1) << C2 --> X << (C2 - C1) & (-1 << C2)
958 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShrAmt);
959 auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
960 NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
961 NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
962 Builder.Insert(NewShl);
963 APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
964 return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
965 }
966 if (ShrAmt > ShAmt) {
967 // If C1 > C2: (X >>? C1) << C2 --> X >>? (C1 - C2) & (-1 << C2)
968 Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmt);
969 auto *OldShr = cast<BinaryOperator>(Op0);
970 auto *NewShr =
971 BinaryOperator::Create(OldShr->getOpcode(), X, ShiftDiff);
972 NewShr->setIsExact(OldShr->isExact());
973 Builder.Insert(NewShr);
974 APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
975 return BinaryOperator::CreateAnd(NewShr, ConstantInt::get(Ty, Mask));
976 }
977 }
978
979 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1))) && ShOp1->ult(BitWidth)) {
980 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
981 // Oversized shifts are simplified to zero in InstSimplify.
982 if (AmtSum < BitWidth)
983 // (X << C1) << C2 --> X << (C1 + C2)
984 return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
985 }
986
987 // If the shifted-out value is known-zero, then this is a NUW shift.
988 if (!I.hasNoUnsignedWrap() &&
989 MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, ShAmt), 0, &I)) {
990 I.setHasNoUnsignedWrap();
991 return &I;
992 }
993
994 // If the shifted-out value is all signbits, then this is a NSW shift.
995 if (!I.hasNoSignedWrap() && ComputeNumSignBits(Op0, 0, &I) > ShAmt) {
996 I.setHasNoSignedWrap();
997 return &I;
998 }
999 }
1000
1001 // Transform (x >> y) << y to x & (-1 << y)
1002 // Valid for any type of right-shift.
1003 Value *X;
1004 if (match(Op0, m_OneUse(m_Shr(m_Value(X), m_Specific(Op1))))) {
1005 Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1006 Value *Mask = Builder.CreateShl(AllOnes, Op1);
1007 return BinaryOperator::CreateAnd(Mask, X);
1008 }
1009
1010 Constant *C1;
1011 if (match(Op1, m_Constant(C1))) {
1012 Constant *C2;
1013 Value *X;
1014 // (C2 << X) << C1 --> (C2 << C1) << X
1015 if (match(Op0, m_OneUse(m_Shl(m_Constant(C2), m_Value(X)))))
1016 return BinaryOperator::CreateShl(ConstantExpr::getShl(C2, C1), X);
1017
1018 // (X * C2) << C1 --> X * (C2 << C1)
1019 if (match(Op0, m_Mul(m_Value(X), m_Constant(C2))))
1020 return BinaryOperator::CreateMul(X, ConstantExpr::getShl(C2, C1));
1021
1022 // shl (zext i1 X), C1 --> select (X, 1 << C1, 0)
1023 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1024 auto *NewC = ConstantExpr::getShl(ConstantInt::get(Ty, 1), C1);
1025 return SelectInst::Create(X, NewC, ConstantInt::getNullValue(Ty));
1026 }
1027 }
1028
1029 // (1 << (C - x)) -> ((1 << C) >> x) if C is bitwidth - 1
1030 if (match(Op0, m_One()) &&
1031 match(Op1, m_Sub(m_SpecificInt(BitWidth - 1), m_Value(X))))
1032 return BinaryOperator::CreateLShr(
1033 ConstantInt::get(Ty, APInt::getSignMask(BitWidth)), X);
1034
1035 return nullptr;
1036}
1037
1038Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
1039 if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
1040 SQ.getWithInstruction(&I)))
1041 return replaceInstUsesWith(I, V);
1042
1043 if (Instruction *X = foldVectorBinop(I))
1044 return X;
1045
1046 if (Instruction *R = commonShiftTransforms(I))
1047 return R;
1048
1049 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1050 Type *Ty = I.getType();
1051 const APInt *ShAmtAPInt;
1052 if (match(Op1, m_APInt(ShAmtAPInt))) {
1053 unsigned ShAmt = ShAmtAPInt->getZExtValue();
1054 unsigned BitWidth = Ty->getScalarSizeInBits();
1055 auto *II = dyn_cast<IntrinsicInst>(Op0);
1056 if (II && isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt &&
1057 (II->getIntrinsicID() == Intrinsic::ctlz ||
1058 II->getIntrinsicID() == Intrinsic::cttz ||
1059 II->getIntrinsicID() == Intrinsic::ctpop)) {
1060 // ctlz.i32(x)>>5 --> zext(x == 0)
1061 // cttz.i32(x)>>5 --> zext(x == 0)
1062 // ctpop.i32(x)>>5 --> zext(x == -1)
1063 bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
1064 Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
1065 Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
1066 return new ZExtInst(Cmp, Ty);
1067 }
1068
1069 Value *X;
1070 const APInt *ShOp1;
1071 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1))) && ShOp1->ult(BitWidth)) {
1072 if (ShOp1->ult(ShAmt)) {
1073 unsigned ShlAmt = ShOp1->getZExtValue();
1074 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
1075 if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1076 // (X <<nuw C1) >>u C2 --> X >>u (C2 - C1)
1077 auto *NewLShr = BinaryOperator::CreateLShr(X, ShiftDiff);
1078 NewLShr->setIsExact(I.isExact());
1079 return NewLShr;
1080 }
1081 // (X << C1) >>u C2 --> (X >>u (C2 - C1)) & (-1 >> C2)
1082 Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
1083 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
1084 return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
1085 }
1086 if (ShOp1->ugt(ShAmt)) {
1087 unsigned ShlAmt = ShOp1->getZExtValue();
1088 Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
1089 if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
1090 // (X <<nuw C1) >>u C2 --> X <<nuw (C1 - C2)
1091 auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
1092 NewShl->setHasNoUnsignedWrap(true);
1093 return NewShl;
1094 }
1095 // (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2)
1096 Value *NewShl = Builder.CreateShl(X, ShiftDiff);
1097 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
1098 return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
1099 }
1100 assert(*ShOp1 == ShAmt)((*ShOp1 == ShAmt) ? static_cast<void> (0) : __assert_fail
("*ShOp1 == ShAmt", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 1100, __PRETTY_FUNCTION__))
;
1101 // (X << C) >>u C --> X & (-1 >>u C)
1102 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
1103 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
1104 }
1105
1106 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) &&
1107 (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
1108 assert(ShAmt < X->getType()->getScalarSizeInBits() &&((ShAmt < X->getType()->getScalarSizeInBits() &&
"Big shift not simplified to zero?") ? static_cast<void>
(0) : __assert_fail ("ShAmt < X->getType()->getScalarSizeInBits() && \"Big shift not simplified to zero?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 1109, __PRETTY_FUNCTION__))
1109 "Big shift not simplified to zero?")((ShAmt < X->getType()->getScalarSizeInBits() &&
"Big shift not simplified to zero?") ? static_cast<void>
(0) : __assert_fail ("ShAmt < X->getType()->getScalarSizeInBits() && \"Big shift not simplified to zero?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 1109, __PRETTY_FUNCTION__))
;
1110 // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN
1111 Value *NewLShr = Builder.CreateLShr(X, ShAmt);
1112 return new ZExtInst(NewLShr, Ty);
1113 }
1114
1115 if (match(Op0, m_SExt(m_Value(X))) &&
1116 (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
1117 // Are we moving the sign bit to the low bit and widening with high zeros?
1118 unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits();
1119 if (ShAmt == BitWidth - 1) {
1120 // lshr (sext i1 X to iN), N-1 --> zext X to iN
1121 if (SrcTyBitWidth == 1)
1122 return new ZExtInst(X, Ty);
1123
1124 // lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
1125 if (Op0->hasOneUse()) {
1126 Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
1127 return new ZExtInst(NewLShr, Ty);
1128 }
1129 }
1130
1131 // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN
1132 if (ShAmt == BitWidth - SrcTyBitWidth && Op0->hasOneUse()) {
1133 // The new shift amount can't be more than the narrow source type.
1134 unsigned NewShAmt = std::min(ShAmt, SrcTyBitWidth - 1);
1135 Value *AShr = Builder.CreateAShr(X, NewShAmt);
1136 return new ZExtInst(AShr, Ty);
1137 }
1138 }
1139
1140 // lshr i32 (X -nsw Y), 31 --> zext (X < Y)
1141 Value *Y;
1142 if (ShAmt == BitWidth - 1 &&
1143 match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1144 return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1145
1146 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShOp1)))) {
1147 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
1148 // Oversized shifts are simplified to zero in InstSimplify.
1149 if (AmtSum < BitWidth)
1150 // (X >>u C1) >>u C2 --> X >>u (C1 + C2)
1151 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
1152 }
1153
1154 // Look for a "splat" mul pattern - it replicates bits across each half of
1155 // a value, so a right shift is just a mask of the low bits:
1156 // lshr i32 (mul nuw X, Pow2+1), 16 --> and X, Pow2-1
1157 // TODO: Generalize to allow more than just half-width shifts?
1158 const APInt *MulC;
1159 if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC))) &&
1160 ShAmt * 2 == BitWidth && (*MulC - 1).isPowerOf2() &&
1161 MulC->logBase2() == ShAmt)
1162 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
1163
1164 // If the shifted-out value is known-zero, then this is an exact shift.
1165 if (!I.isExact() &&
1166 MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
1167 I.setIsExact();
1168 return &I;
1169 }
1170 }
1171
1172 // Transform (x << y) >> y to x & (-1 >> y)
1173 Value *X;
1174 if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))))) {
1175 Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
1176 Value *Mask = Builder.CreateLShr(AllOnes, Op1);
1177 return BinaryOperator::CreateAnd(Mask, X);
1178 }
1179
1180 return nullptr;
1181}
1182
1183Instruction *
1184InstCombinerImpl::foldVariableSignZeroExtensionOfVariableHighBitExtract(
1185 BinaryOperator &OldAShr) {
1186 assert(OldAShr.getOpcode() == Instruction::AShr &&((OldAShr.getOpcode() == Instruction::AShr && "Must be called with arithmetic right-shift instruction only."
) ? static_cast<void> (0) : __assert_fail ("OldAShr.getOpcode() == Instruction::AShr && \"Must be called with arithmetic right-shift instruction only.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 1187, __PRETTY_FUNCTION__))
1187 "Must be called with arithmetic right-shift instruction only.")((OldAShr.getOpcode() == Instruction::AShr && "Must be called with arithmetic right-shift instruction only."
) ? static_cast<void> (0) : __assert_fail ("OldAShr.getOpcode() == Instruction::AShr && \"Must be called with arithmetic right-shift instruction only.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 1187, __PRETTY_FUNCTION__))
;
1188
1189 // Check that constant C is a splat of the element-wise bitwidth of V.
1190 auto BitWidthSplat = [](Constant *C, Value *V) {
1191 return match(
1192 C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1193 APInt(C->getType()->getScalarSizeInBits(),
1194 V->getType()->getScalarSizeInBits())));
1195 };
1196
1197 // It should look like variable-length sign-extension on the outside:
1198 // (Val << (bitwidth(Val)-Nbits)) a>> (bitwidth(Val)-Nbits)
1199 Value *NBits;
1200 Instruction *MaybeTrunc;
1201 Constant *C1, *C2;
1202 if (!match(&OldAShr,
1203 m_AShr(m_Shl(m_Instruction(MaybeTrunc),
1204 m_ZExtOrSelf(m_Sub(m_Constant(C1),
1205 m_ZExtOrSelf(m_Value(NBits))))),
1206 m_ZExtOrSelf(m_Sub(m_Constant(C2),
1207 m_ZExtOrSelf(m_Deferred(NBits)))))) ||
1208 !BitWidthSplat(C1, &OldAShr) || !BitWidthSplat(C2, &OldAShr))
1209 return nullptr;
1210
1211 // There may or may not be a truncation after outer two shifts.
1212 Instruction *HighBitExtract;
1213 match(MaybeTrunc, m_TruncOrSelf(m_Instruction(HighBitExtract)));
1214 bool HadTrunc = MaybeTrunc != HighBitExtract;
1215
1216 // And finally, the innermost part of the pattern must be a right-shift.
1217 Value *X, *NumLowBitsToSkip;
1218 if (!match(HighBitExtract, m_Shr(m_Value(X), m_Value(NumLowBitsToSkip))))
1219 return nullptr;
1220
1221 // Said right-shift must extract high NBits bits - C0 must be it's bitwidth.
1222 Constant *C0;
1223 if (!match(NumLowBitsToSkip,
1224 m_ZExtOrSelf(
1225 m_Sub(m_Constant(C0), m_ZExtOrSelf(m_Specific(NBits))))) ||
1226 !BitWidthSplat(C0, HighBitExtract))
1227 return nullptr;
1228
1229 // Since the NBits is identical for all shifts, if the outermost and
1230 // innermost shifts are identical, then outermost shifts are redundant.
1231 // If we had truncation, do keep it though.
1232 if (HighBitExtract->getOpcode() == OldAShr.getOpcode())
1233 return replaceInstUsesWith(OldAShr, MaybeTrunc);
1234
1235 // Else, if there was a truncation, then we need to ensure that one
1236 // instruction will go away.
1237 if (HadTrunc && !match(&OldAShr, m_c_BinOp(m_OneUse(m_Value()), m_Value())))
1238 return nullptr;
1239
1240 // Finally, bypass two innermost shifts, and perform the outermost shift on
1241 // the operands of the innermost shift.
1242 Instruction *NewAShr =
1243 BinaryOperator::Create(OldAShr.getOpcode(), X, NumLowBitsToSkip);
1244 NewAShr->copyIRFlags(HighBitExtract); // We can preserve 'exact'-ness.
1245 if (!HadTrunc)
1246 return NewAShr;
1247
1248 Builder.Insert(NewAShr);
1249 return TruncInst::CreateTruncOrBitCast(NewAShr, OldAShr.getType());
1250}
1251
1252Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
1253 if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
1254 SQ.getWithInstruction(&I)))
1255 return replaceInstUsesWith(I, V);
1256
1257 if (Instruction *X = foldVectorBinop(I))
1258 return X;
1259
1260 if (Instruction *R = commonShiftTransforms(I))
1261 return R;
1262
1263 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1264 Type *Ty = I.getType();
1265 unsigned BitWidth = Ty->getScalarSizeInBits();
1266 const APInt *ShAmtAPInt;
1267 if (match(Op1, m_APInt(ShAmtAPInt)) && ShAmtAPInt->ult(BitWidth)) {
1268 unsigned ShAmt = ShAmtAPInt->getZExtValue();
1269
1270 // If the shift amount equals the difference in width of the destination
1271 // and source scalar types:
1272 // ashr (shl (zext X), C), C --> sext X
1273 Value *X;
1274 if (match(Op0, m_Shl(m_ZExt(m_Value(X)), m_Specific(Op1))) &&
1275 ShAmt == BitWidth - X->getType()->getScalarSizeInBits())
1276 return new SExtInst(X, Ty);
1277
1278 // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However,
1279 // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
1280 const APInt *ShOp1;
1281 if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShOp1))) &&
1282 ShOp1->ult(BitWidth)) {
1283 unsigned ShlAmt = ShOp1->getZExtValue();
1284 if (ShlAmt < ShAmt) {
1285 // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1)
1286 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
1287 auto *NewAShr = BinaryOperator::CreateAShr(X, ShiftDiff);
1288 NewAShr->setIsExact(I.isExact());
1289 return NewAShr;
1290 }
1291 if (ShlAmt > ShAmt) {
1292 // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
1293 Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
1294 auto *NewShl = BinaryOperator::Create(Instruction::Shl, X, ShiftDiff);
1295 NewShl->setHasNoSignedWrap(true);
1296 return NewShl;
1297 }
1298 }
1299
1300 if (match(Op0, m_AShr(m_Value(X), m_APInt(ShOp1))) &&
1301 ShOp1->ult(BitWidth)) {
1302 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
1303 // Oversized arithmetic shifts replicate the sign bit.
1304 AmtSum = std::min(AmtSum, BitWidth - 1);
1305 // (X >>s C1) >>s C2 --> X >>s (C1 + C2)
1306 return BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum));
1307 }
1308
1309 if (match(Op0, m_OneUse(m_SExt(m_Value(X)))) &&
1310 (Ty->isVectorTy() || shouldChangeType(Ty, X->getType()))) {
1311 // ashr (sext X), C --> sext (ashr X, C')
1312 Type *SrcTy = X->getType();
1313 ShAmt = std::min(ShAmt, SrcTy->getScalarSizeInBits() - 1);
1314 Value *NewSh = Builder.CreateAShr(X, ConstantInt::get(SrcTy, ShAmt));
1315 return new SExtInst(NewSh, Ty);
1316 }
1317
1318 // ashr i32 (X -nsw Y), 31 --> sext (X < Y)
1319 Value *Y;
1320 if (ShAmt == BitWidth - 1 &&
1321 match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
1322 return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
1323
1324 // If the shifted-out value is known-zero, then this is an exact shift.
1325 if (!I.isExact() &&
1326 MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
1327 I.setIsExact();
1328 return &I;
1329 }
1330 }
1331
1332 if (Instruction *R = foldVariableSignZeroExtensionOfVariableHighBitExtract(I))
1333 return R;
1334
1335 // See if we can turn a signed shr into an unsigned shr.
1336 if (MaskedValueIsZero(Op0, APInt::getSignMask(BitWidth), 0, &I))
1337 return BinaryOperator::CreateLShr(Op0, Op1);
1338
1339 // ashr (xor %x, -1), %y --> xor (ashr %x, %y), -1
1340 Value *X;
1341 if (match(Op0, m_OneUse(m_Not(m_Value(X))))) {
1342 // Note that we must drop 'exact'-ness of the shift!
1343 // Note that we can't keep undef's in -1 vector constant!
1344 auto *NewAShr = Builder.CreateAShr(X, Op1, Op0->getName() + ".not");
1345 return BinaryOperator::CreateNot(NewAShr);
1346 }
1347
1348 return nullptr;
1349}

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/PatternMatch.h

1//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the LLVM IR. The power of these routines is
11// that it allows you to write concise patterns that are expressive and easy to
12// understand. The other major advantage of this is that it allows you to
13// trivially capture/bind elements in the pattern to variables. For example,
14// you can do something like this:
15//
16// Value *Exp = ...
17// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
18// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
19// m_And(m_Value(Y), m_ConstantInt(C2))))) {
20// ... Pattern is matched and variables are bound ...
21// }
22//
23// This is primarily useful to things like the instruction combiner, but can
24// also be useful for static analysis tools or code generators.
25//
26//===----------------------------------------------------------------------===//
27
28#ifndef LLVM_IR_PATTERNMATCH_H
29#define LLVM_IR_PATTERNMATCH_H
30
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include <cstdint>
45
46namespace llvm {
47namespace PatternMatch {
48
49template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
50 return const_cast<Pattern &>(P).match(V);
31
Calling 'BinaryOp_match::match'
36
Returning from 'BinaryOp_match::match'
37
Returning the value 1, which participates in a condition later
51}
52
53template <typename Pattern> bool match(ArrayRef<int> Mask, const Pattern &P) {
54 return const_cast<Pattern &>(P).match(Mask);
55}
56
57template <typename SubPattern_t> struct OneUse_match {
58 SubPattern_t SubPattern;
59
60 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
61
62 template <typename OpTy> bool match(OpTy *V) {
63 return V->hasOneUse() && SubPattern.match(V);
64 }
65};
66
67template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
68 return SubPattern;
69}
70
71template <typename Class> struct class_match {
72 template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
73};
74
75/// Match an arbitrary value and ignore it.
76inline class_match<Value> m_Value() { return class_match<Value>(); }
77
78/// Match an arbitrary unary operation and ignore it.
79inline class_match<UnaryOperator> m_UnOp() {
80 return class_match<UnaryOperator>();
81}
82
83/// Match an arbitrary binary operation and ignore it.
84inline class_match<BinaryOperator> m_BinOp() {
85 return class_match<BinaryOperator>();
86}
87
88/// Matches any compare instruction and ignore it.
89inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
90
91/// Match an arbitrary undef constant.
92inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
93
94/// Match an arbitrary poison constant.
95inline class_match<PoisonValue> m_Poison() { return class_match<PoisonValue>(); }
96
97/// Match an arbitrary Constant and ignore it.
98inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
99
100/// Match an arbitrary ConstantInt and ignore it.
101inline class_match<ConstantInt> m_ConstantInt() {
102 return class_match<ConstantInt>();
103}
104
105/// Match an arbitrary ConstantFP and ignore it.
106inline class_match<ConstantFP> m_ConstantFP() {
107 return class_match<ConstantFP>();
108}
109
110/// Match an arbitrary ConstantExpr and ignore it.
111inline class_match<ConstantExpr> m_ConstantExpr() {
112 return class_match<ConstantExpr>();
113}
114
115/// Match an arbitrary basic block value and ignore it.
116inline class_match<BasicBlock> m_BasicBlock() {
117 return class_match<BasicBlock>();
118}
119
120/// Inverting matcher
121template <typename Ty> struct match_unless {
122 Ty M;
123
124 match_unless(const Ty &Matcher) : M(Matcher) {}
125
126 template <typename ITy> bool match(ITy *V) { return !M.match(V); }
127};
128
129/// Match if the inner matcher does *NOT* match.
130template <typename Ty> inline match_unless<Ty> m_Unless(const Ty &M) {
131 return match_unless<Ty>(M);
132}
133
134/// Matching combinators
135template <typename LTy, typename RTy> struct match_combine_or {
136 LTy L;
137 RTy R;
138
139 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
140
141 template <typename ITy> bool match(ITy *V) {
142 if (L.match(V))
143 return true;
144 if (R.match(V))
145 return true;
146 return false;
147 }
148};
149
150template <typename LTy, typename RTy> struct match_combine_and {
151 LTy L;
152 RTy R;
153
154 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
155
156 template <typename ITy> bool match(ITy *V) {
157 if (L.match(V))
158 if (R.match(V))
159 return true;
160 return false;
161 }
162};
163
164/// Combine two pattern matchers matching L || R
165template <typename LTy, typename RTy>
166inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
167 return match_combine_or<LTy, RTy>(L, R);
21
Returning without writing to 'L.Op.VR'
168}
169
170/// Combine two pattern matchers matching L && R
171template <typename LTy, typename RTy>
172inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
173 return match_combine_and<LTy, RTy>(L, R);
174}
175
176struct apint_match {
177 const APInt *&Res;
178 bool AllowUndef;
179
180 apint_match(const APInt *&Res, bool AllowUndef)
181 : Res(Res), AllowUndef(AllowUndef) {}
182
183 template <typename ITy> bool match(ITy *V) {
184 if (auto *CI = dyn_cast<ConstantInt>(V)) {
185 Res = &CI->getValue();
186 return true;
187 }
188 if (V->getType()->isVectorTy())
189 if (const auto *C = dyn_cast<Constant>(V))
190 if (auto *CI = dyn_cast_or_null<ConstantInt>(
191 C->getSplatValue(AllowUndef))) {
192 Res = &CI->getValue();
193 return true;
194 }
195 return false;
196 }
197};
198// Either constexpr if or renaming ConstantFP::getValueAPF to
199// ConstantFP::getValue is needed to do it via single template
200// function for both apint/apfloat.
201struct apfloat_match {
202 const APFloat *&Res;
203 bool AllowUndef;
204
205 apfloat_match(const APFloat *&Res, bool AllowUndef)
206 : Res(Res), AllowUndef(AllowUndef) {}
207
208 template <typename ITy> bool match(ITy *V) {
209 if (auto *CI = dyn_cast<ConstantFP>(V)) {
210 Res = &CI->getValueAPF();
211 return true;
212 }
213 if (V->getType()->isVectorTy())
214 if (const auto *C = dyn_cast<Constant>(V))
215 if (auto *CI = dyn_cast_or_null<ConstantFP>(
216 C->getSplatValue(AllowUndef))) {
217 Res = &CI->getValueAPF();
218 return true;
219 }
220 return false;
221 }
222};
223
224/// Match a ConstantInt or splatted ConstantVector, binding the
225/// specified pointer to the contained APInt.
226inline apint_match m_APInt(const APInt *&Res) {
227 // Forbid undefs by default to maintain previous behavior.
228 return apint_match(Res, /* AllowUndef */ false);
229}
230
231/// Match APInt while allowing undefs in splat vector constants.
232inline apint_match m_APIntAllowUndef(const APInt *&Res) {
233 return apint_match(Res, /* AllowUndef */ true);
234}
235
236/// Match APInt while forbidding undefs in splat vector constants.
237inline apint_match m_APIntForbidUndef(const APInt *&Res) {
238 return apint_match(Res, /* AllowUndef */ false);
239}
240
241/// Match a ConstantFP or splatted ConstantVector, binding the
242/// specified pointer to the contained APFloat.
243inline apfloat_match m_APFloat(const APFloat *&Res) {
244 // Forbid undefs by default to maintain previous behavior.
245 return apfloat_match(Res, /* AllowUndef */ false);
246}
247
248/// Match APFloat while allowing undefs in splat vector constants.
249inline apfloat_match m_APFloatAllowUndef(const APFloat *&Res) {
250 return apfloat_match(Res, /* AllowUndef */ true);
251}
252
253/// Match APFloat while forbidding undefs in splat vector constants.
254inline apfloat_match m_APFloatForbidUndef(const APFloat *&Res) {
255 return apfloat_match(Res, /* AllowUndef */ false);
256}
257
258template <int64_t Val> struct constantint_match {
259 template <typename ITy> bool match(ITy *V) {
260 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
261 const APInt &CIV = CI->getValue();
262 if (Val >= 0)
263 return CIV == static_cast<uint64_t>(Val);
264 // If Val is negative, and CI is shorter than it, truncate to the right
265 // number of bits. If it is larger, then we have to sign extend. Just
266 // compare their negated values.
267 return -CIV == -Val;
268 }
269 return false;
270 }
271};
272
273/// Match a ConstantInt with a specific value.
274template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
275 return constantint_match<Val>();
276}
277
278/// This helper class is used to match constant scalars, vector splats,
279/// and fixed width vectors that satisfy a specified predicate.
280/// For fixed width vector constants, undefined elements are ignored.
281template <typename Predicate, typename ConstantVal>
282struct cstval_pred_ty : public Predicate {
283 template <typename ITy> bool match(ITy *V) {
284 if (const auto *CV = dyn_cast<ConstantVal>(V))
285 return this->isValue(CV->getValue());
286 if (const auto *VTy = dyn_cast<VectorType>(V->getType())) {
287 if (const auto *C = dyn_cast<Constant>(V)) {
288 if (const auto *CV = dyn_cast_or_null<ConstantVal>(C->getSplatValue()))
289 return this->isValue(CV->getValue());
290
291 // Number of elements of a scalable vector unknown at compile time
292 auto *FVTy = dyn_cast<FixedVectorType>(VTy);
293 if (!FVTy)
294 return false;
295
296 // Non-splat vector constant: check each element for a match.
297 unsigned NumElts = FVTy->getNumElements();
298 assert(NumElts != 0 && "Constant vector with no elements?")((NumElts != 0 && "Constant vector with no elements?"
) ? static_cast<void> (0) : __assert_fail ("NumElts != 0 && \"Constant vector with no elements?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/PatternMatch.h"
, 298, __PRETTY_FUNCTION__))
;
299 bool HasNonUndefElements = false;
300 for (unsigned i = 0; i != NumElts; ++i) {
301 Constant *Elt = C->getAggregateElement(i);
302 if (!Elt)
303 return false;
304 if (isa<UndefValue>(Elt))
305 continue;
306 auto *CV = dyn_cast<ConstantVal>(Elt);
307 if (!CV || !this->isValue(CV->getValue()))
308 return false;
309 HasNonUndefElements = true;
310 }
311 return HasNonUndefElements;
312 }
313 }
314 return false;
315 }
316};
317
318/// specialization of cstval_pred_ty for ConstantInt
319template <typename Predicate>
320using cst_pred_ty = cstval_pred_ty<Predicate, ConstantInt>;
321
322/// specialization of cstval_pred_ty for ConstantFP
323template <typename Predicate>
324using cstfp_pred_ty = cstval_pred_ty<Predicate, ConstantFP>;
325
326/// This helper class is used to match scalar and vector constants that
327/// satisfy a specified predicate, and bind them to an APInt.
328template <typename Predicate> struct api_pred_ty : public Predicate {
329 const APInt *&Res;
330
331 api_pred_ty(const APInt *&R) : Res(R) {}
332
333 template <typename ITy> bool match(ITy *V) {
334 if (const auto *CI = dyn_cast<ConstantInt>(V))
335 if (this->isValue(CI->getValue())) {
336 Res = &CI->getValue();
337 return true;
338 }
339 if (V->getType()->isVectorTy())
340 if (const auto *C = dyn_cast<Constant>(V))
341 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
342 if (this->isValue(CI->getValue())) {
343 Res = &CI->getValue();
344 return true;
345 }
346
347 return false;
348 }
349};
350
351/// This helper class is used to match scalar and vector constants that
352/// satisfy a specified predicate, and bind them to an APFloat.
353/// Undefs are allowed in splat vector constants.
354template <typename Predicate> struct apf_pred_ty : public Predicate {
355 const APFloat *&Res;
356
357 apf_pred_ty(const APFloat *&R) : Res(R) {}
358
359 template <typename ITy> bool match(ITy *V) {
360 if (const auto *CI = dyn_cast<ConstantFP>(V))
361 if (this->isValue(CI->getValue())) {
362 Res = &CI->getValue();
363 return true;
364 }
365 if (V->getType()->isVectorTy())
366 if (const auto *C = dyn_cast<Constant>(V))
367 if (auto *CI = dyn_cast_or_null<ConstantFP>(
368 C->getSplatValue(/* AllowUndef */ true)))
369 if (this->isValue(CI->getValue())) {
370 Res = &CI->getValue();
371 return true;
372 }
373
374 return false;
375 }
376};
377
378///////////////////////////////////////////////////////////////////////////////
379//
380// Encapsulate constant value queries for use in templated predicate matchers.
381// This allows checking if constants match using compound predicates and works
382// with vector constants, possibly with relaxed constraints. For example, ignore
383// undef values.
384//
385///////////////////////////////////////////////////////////////////////////////
386
387struct is_any_apint {
388 bool isValue(const APInt &C) { return true; }
389};
390/// Match an integer or vector with any integral constant.
391/// For vectors, this includes constants with undefined elements.
392inline cst_pred_ty<is_any_apint> m_AnyIntegralConstant() {
393 return cst_pred_ty<is_any_apint>();
394}
395
396struct is_all_ones {
397 bool isValue(const APInt &C) { return C.isAllOnesValue(); }
398};
399/// Match an integer or vector with all bits set.
400/// For vectors, this includes constants with undefined elements.
401inline cst_pred_ty<is_all_ones> m_AllOnes() {
402 return cst_pred_ty<is_all_ones>();
403}
404
405struct is_maxsignedvalue {
406 bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
407};
408/// Match an integer or vector with values having all bits except for the high
409/// bit set (0x7f...).
410/// For vectors, this includes constants with undefined elements.
411inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() {
412 return cst_pred_ty<is_maxsignedvalue>();
413}
414inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) {
415 return V;
416}
417
418struct is_negative {
419 bool isValue(const APInt &C) { return C.isNegative(); }
420};
421/// Match an integer or vector of negative values.
422/// For vectors, this includes constants with undefined elements.
423inline cst_pred_ty<is_negative> m_Negative() {
424 return cst_pred_ty<is_negative>();
425}
426inline api_pred_ty<is_negative> m_Negative(const APInt *&V) {
427 return V;
428}
429
430struct is_nonnegative {
431 bool isValue(const APInt &C) { return C.isNonNegative(); }
432};
433/// Match an integer or vector of non-negative values.
434/// For vectors, this includes constants with undefined elements.
435inline cst_pred_ty<is_nonnegative> m_NonNegative() {
436 return cst_pred_ty<is_nonnegative>();
437}
438inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) {
439 return V;
440}
441
442struct is_strictlypositive {
443 bool isValue(const APInt &C) { return C.isStrictlyPositive(); }
444};
445/// Match an integer or vector of strictly positive values.
446/// For vectors, this includes constants with undefined elements.
447inline cst_pred_ty<is_strictlypositive> m_StrictlyPositive() {
448 return cst_pred_ty<is_strictlypositive>();
449}
450inline api_pred_ty<is_strictlypositive> m_StrictlyPositive(const APInt *&V) {
451 return V;
452}
453
454struct is_nonpositive {
455 bool isValue(const APInt &C) { return C.isNonPositive(); }
456};
457/// Match an integer or vector of non-positive values.
458/// For vectors, this includes constants with undefined elements.
459inline cst_pred_ty<is_nonpositive> m_NonPositive() {
460 return cst_pred_ty<is_nonpositive>();
461}
462inline api_pred_ty<is_nonpositive> m_NonPositive(const APInt *&V) { return V; }
463
464struct is_one {
465 bool isValue(const APInt &C) { return C.isOneValue(); }
466};
467/// Match an integer 1 or a vector with all elements equal to 1.
468/// For vectors, this includes constants with undefined elements.
469inline cst_pred_ty<is_one> m_One() {
470 return cst_pred_ty<is_one>();
471}
472
473struct is_zero_int {
474 bool isValue(const APInt &C) { return C.isNullValue(); }
475};
476/// Match an integer 0 or a vector with all elements equal to 0.
477/// For vectors, this includes constants with undefined elements.
478inline cst_pred_ty<is_zero_int> m_ZeroInt() {
479 return cst_pred_ty<is_zero_int>();
480}
481
482struct is_zero {
483 template <typename ITy> bool match(ITy *V) {
484 auto *C = dyn_cast<Constant>(V);
485 // FIXME: this should be able to do something for scalable vectors
486 return C && (C->isNullValue() || cst_pred_ty<is_zero_int>().match(C));
487 }
488};
489/// Match any null constant or a vector with all elements equal to 0.
490/// For vectors, this includes constants with undefined elements.
491inline is_zero m_Zero() {
492 return is_zero();
493}
494
495struct is_power2 {
496 bool isValue(const APInt &C) { return C.isPowerOf2(); }
497};
498/// Match an integer or vector power-of-2.
499/// For vectors, this includes constants with undefined elements.
500inline cst_pred_ty<is_power2> m_Power2() {
501 return cst_pred_ty<is_power2>();
502}
503inline api_pred_ty<is_power2> m_Power2(const APInt *&V) {
504 return V;
505}
506
507struct is_negated_power2 {
508 bool isValue(const APInt &C) { return (-C).isPowerOf2(); }
509};
510/// Match a integer or vector negated power-of-2.
511/// For vectors, this includes constants with undefined elements.
512inline cst_pred_ty<is_negated_power2> m_NegatedPower2() {
513 return cst_pred_ty<is_negated_power2>();
514}
515inline api_pred_ty<is_negated_power2> m_NegatedPower2(const APInt *&V) {
516 return V;
517}
518
519struct is_power2_or_zero {
520 bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
521};
522/// Match an integer or vector of 0 or power-of-2 values.
523/// For vectors, this includes constants with undefined elements.
524inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() {
525 return cst_pred_ty<is_power2_or_zero>();
526}
527inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) {
528 return V;
529}
530
531struct is_sign_mask {
532 bool isValue(const APInt &C) { return C.isSignMask(); }
533};
534/// Match an integer or vector with only the sign bit(s) set.
535/// For vectors, this includes constants with undefined elements.
536inline cst_pred_ty<is_sign_mask> m_SignMask() {
537 return cst_pred_ty<is_sign_mask>();
538}
539
540struct is_lowbit_mask {
541 bool isValue(const APInt &C) { return C.isMask(); }
542};
543/// Match an integer or vector with only the low bit(s) set.
544/// For vectors, this includes constants with undefined elements.
545inline cst_pred_ty<is_lowbit_mask> m_LowBitMask() {
546 return cst_pred_ty<is_lowbit_mask>();
547}
548
549struct icmp_pred_with_threshold {
550 ICmpInst::Predicate Pred;
551 const APInt *Thr;
552 bool isValue(const APInt &C) {
553 switch (Pred) {
554 case ICmpInst::Predicate::ICMP_EQ:
555 return C.eq(*Thr);
556 case ICmpInst::Predicate::ICMP_NE:
557 return C.ne(*Thr);
558 case ICmpInst::Predicate::ICMP_UGT:
559 return C.ugt(*Thr);
560 case ICmpInst::Predicate::ICMP_UGE:
561 return C.uge(*Thr);
562 case ICmpInst::Predicate::ICMP_ULT:
563 return C.ult(*Thr);
564 case ICmpInst::Predicate::ICMP_ULE:
565 return C.ule(*Thr);
566 case ICmpInst::Predicate::ICMP_SGT:
567 return C.sgt(*Thr);
568 case ICmpInst::Predicate::ICMP_SGE:
569 return C.sge(*Thr);
570 case ICmpInst::Predicate::ICMP_SLT:
571 return C.slt(*Thr);
572 case ICmpInst::Predicate::ICMP_SLE:
573 return C.sle(*Thr);
574 default:
575 llvm_unreachable("Unhandled ICmp predicate")::llvm::llvm_unreachable_internal("Unhandled ICmp predicate",
"/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/PatternMatch.h"
, 575)
;
576 }
577 }
578};
579/// Match an integer or vector with every element comparing 'pred' (eg/ne/...)
580/// to Threshold. For vectors, this includes constants with undefined elements.
581inline cst_pred_ty<icmp_pred_with_threshold>
582m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold) {
583 cst_pred_ty<icmp_pred_with_threshold> P;
584 P.Pred = Predicate;
585 P.Thr = &Threshold;
586 return P;
587}
588
589struct is_nan {
590 bool isValue(const APFloat &C) { return C.isNaN(); }
591};
592/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
593/// For vectors, this includes constants with undefined elements.
594inline cstfp_pred_ty<is_nan> m_NaN() {
595 return cstfp_pred_ty<is_nan>();
596}
597
598struct is_nonnan {
599 bool isValue(const APFloat &C) { return !C.isNaN(); }
600};
601/// Match a non-NaN FP constant.
602/// For vectors, this includes constants with undefined elements.
603inline cstfp_pred_ty<is_nonnan> m_NonNaN() {
604 return cstfp_pred_ty<is_nonnan>();
605}
606
607struct is_inf {
608 bool isValue(const APFloat &C) { return C.isInfinity(); }
609};
610/// Match a positive or negative infinity FP constant.
611/// For vectors, this includes constants with undefined elements.
612inline cstfp_pred_ty<is_inf> m_Inf() {
613 return cstfp_pred_ty<is_inf>();
614}
615
616struct is_noninf {
617 bool isValue(const APFloat &C) { return !C.isInfinity(); }
618};
619/// Match a non-infinity FP constant, i.e. finite or NaN.
620/// For vectors, this includes constants with undefined elements.
621inline cstfp_pred_ty<is_noninf> m_NonInf() {
622 return cstfp_pred_ty<is_noninf>();
623}
624
625struct is_finite {
626 bool isValue(const APFloat &C) { return C.isFinite(); }
627};
628/// Match a finite FP constant, i.e. not infinity or NaN.
629/// For vectors, this includes constants with undefined elements.
630inline cstfp_pred_ty<is_finite> m_Finite() {
631 return cstfp_pred_ty<is_finite>();
632}
633inline apf_pred_ty<is_finite> m_Finite(const APFloat *&V) { return V; }
634
635struct is_finitenonzero {
636 bool isValue(const APFloat &C) { return C.isFiniteNonZero(); }
637};
638/// Match a finite non-zero FP constant.
639/// For vectors, this includes constants with undefined elements.
640inline cstfp_pred_ty<is_finitenonzero> m_FiniteNonZero() {
641 return cstfp_pred_ty<is_finitenonzero>();
642}
643inline apf_pred_ty<is_finitenonzero> m_FiniteNonZero(const APFloat *&V) {
644 return V;
645}
646
647struct is_any_zero_fp {
648 bool isValue(const APFloat &C) { return C.isZero(); }
649};
650/// Match a floating-point negative zero or positive zero.
651/// For vectors, this includes constants with undefined elements.
652inline cstfp_pred_ty<is_any_zero_fp> m_AnyZeroFP() {
653 return cstfp_pred_ty<is_any_zero_fp>();
654}
655
656struct is_pos_zero_fp {
657 bool isValue(const APFloat &C) { return C.isPosZero(); }
658};
659/// Match a floating-point positive zero.
660/// For vectors, this includes constants with undefined elements.
661inline cstfp_pred_ty<is_pos_zero_fp> m_PosZeroFP() {
662 return cstfp_pred_ty<is_pos_zero_fp>();
663}
664
665struct is_neg_zero_fp {
666 bool isValue(const APFloat &C) { return C.isNegZero(); }
667};
668/// Match a floating-point negative zero.
669/// For vectors, this includes constants with undefined elements.
670inline cstfp_pred_ty<is_neg_zero_fp> m_NegZeroFP() {
671 return cstfp_pred_ty<is_neg_zero_fp>();
672}
673
674struct is_non_zero_fp {
675 bool isValue(const APFloat &C) { return C.isNonZero(); }
676};
677/// Match a floating-point non-zero.
678/// For vectors, this includes constants with undefined elements.
679inline cstfp_pred_ty<is_non_zero_fp> m_NonZeroFP() {
680 return cstfp_pred_ty<is_non_zero_fp>();
681}
682
683///////////////////////////////////////////////////////////////////////////////
684
685template <typename Class> struct bind_ty {
686 Class *&VR;
687
688 bind_ty(Class *&V) : VR(V) {}
689
690 template <typename ITy> bool match(ITy *V) {
691 if (auto *CV = dyn_cast<Class>(V)) {
692 VR = CV;
693 return true;
694 }
695 return false;
696 }
697};
698
699/// Match a value, capturing it if we match.
700inline bind_ty<Value> m_Value(Value *&V) { return V; }
12
Calling constructor for 'bind_ty<llvm::Value>'
13
Returning from constructor for 'bind_ty<llvm::Value>'
14
Returning without writing to 'V'
701inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
702
703/// Match an instruction, capturing it if we match.
704inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
705/// Match a unary operator, capturing it if we match.
706inline bind_ty<UnaryOperator> m_UnOp(UnaryOperator *&I) { return I; }
707/// Match a binary operator, capturing it if we match.
708inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
709/// Match a with overflow intrinsic, capturing it if we match.
710inline bind_ty<WithOverflowInst> m_WithOverflowInst(WithOverflowInst *&I) { return I; }
711inline bind_ty<const WithOverflowInst>
712m_WithOverflowInst(const WithOverflowInst *&I) {
713 return I;
714}
715
716/// Match a Constant, capturing the value if we match.
717inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
718
719/// Match a ConstantInt, capturing the value if we match.
720inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
721
722/// Match a ConstantFP, capturing the value if we match.
723inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
724
725/// Match a ConstantExpr, capturing the value if we match.
726inline bind_ty<ConstantExpr> m_ConstantExpr(ConstantExpr *&C) { return C; }
727
728/// Match a basic block value, capturing it if we match.
729inline bind_ty<BasicBlock> m_BasicBlock(BasicBlock *&V) { return V; }
730inline bind_ty<const BasicBlock> m_BasicBlock(const BasicBlock *&V) {
731 return V;
732}
733
734/// Match an arbitrary immediate Constant and ignore it.
735inline match_combine_and<class_match<Constant>,
736 match_unless<class_match<ConstantExpr>>>
737m_ImmConstant() {
738 return m_CombineAnd(m_Constant(), m_Unless(m_ConstantExpr()));
739}
740
741/// Match an immediate Constant, capturing the value if we match.
742inline match_combine_and<bind_ty<Constant>,
743 match_unless<class_match<ConstantExpr>>>
744m_ImmConstant(Constant *&C) {
745 return m_CombineAnd(m_Constant(C), m_Unless(m_ConstantExpr()));
746}
747
748/// Match a specified Value*.
749struct specificval_ty {
750 const Value *Val;
751
752 specificval_ty(const Value *V) : Val(V) {}
753
754 template <typename ITy> bool match(ITy *V) { return V == Val; }
755};
756
757/// Match if we have a specific specified value.
758inline specificval_ty m_Specific(const Value *V) { return V; }
759
760/// Stores a reference to the Value *, not the Value * itself,
761/// thus can be used in commutative matchers.
762template <typename Class> struct deferredval_ty {
763 Class *const &Val;
764
765 deferredval_ty(Class *const &V) : Val(V) {}
766
767 template <typename ITy> bool match(ITy *const V) { return V == Val; }
768};
769
770/// A commutative-friendly version of m_Specific().
771inline deferredval_ty<Value> m_Deferred(Value *const &V) { return V; }
772inline deferredval_ty<const Value> m_Deferred(const Value *const &V) {
773 return V;
774}
775
776/// Match a specified floating point value or vector of all elements of
777/// that value.
778struct specific_fpval {
779 double Val;
780
781 specific_fpval(double V) : Val(V) {}
782
783 template <typename ITy> bool match(ITy *V) {
784 if (const auto *CFP = dyn_cast<ConstantFP>(V))
785 return CFP->isExactlyValue(Val);
786 if (V->getType()->isVectorTy())
787 if (const auto *C = dyn_cast<Constant>(V))
788 if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
789 return CFP->isExactlyValue(Val);
790 return false;
791 }
792};
793
794/// Match a specific floating point value or vector with all elements
795/// equal to the value.
796inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
797
798/// Match a float 1.0 or vector with all elements equal to 1.0.
799inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
800
801struct bind_const_intval_ty {
802 uint64_t &VR;
803
804 bind_const_intval_ty(uint64_t &V) : VR(V) {}
805
806 template <typename ITy> bool match(ITy *V) {
807 if (const auto *CV = dyn_cast<ConstantInt>(V))
808 if (CV->getValue().ule(UINT64_MAX(18446744073709551615UL))) {
809 VR = CV->getZExtValue();
810 return true;
811 }
812 return false;
813 }
814};
815
816/// Match a specified integer value or vector of all elements of that
817/// value.
818template <bool AllowUndefs>
819struct specific_intval {
820 APInt Val;
821
822 specific_intval(APInt V) : Val(std::move(V)) {}
823
824 template <typename ITy> bool match(ITy *V) {
825 const auto *CI = dyn_cast<ConstantInt>(V);
826 if (!CI && V->getType()->isVectorTy())
827 if (const auto *C = dyn_cast<Constant>(V))
828 CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndefs));
829
830 return CI && APInt::isSameValue(CI->getValue(), Val);
831 }
832};
833
834/// Match a specific integer value or vector with all elements equal to
835/// the value.
836inline specific_intval<false> m_SpecificInt(APInt V) {
837 return specific_intval<false>(std::move(V));
838}
839
840inline specific_intval<false> m_SpecificInt(uint64_t V) {
841 return m_SpecificInt(APInt(64, V));
842}
843
844inline specific_intval<true> m_SpecificIntAllowUndef(APInt V) {
845 return specific_intval<true>(std::move(V));
846}
847
848inline specific_intval<true> m_SpecificIntAllowUndef(uint64_t V) {
849 return m_SpecificIntAllowUndef(APInt(64, V));
850}
851
852/// Match a ConstantInt and bind to its value. This does not match
853/// ConstantInts wider than 64-bits.
854inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
855
856/// Match a specified basic block value.
857struct specific_bbval {
858 BasicBlock *Val;
859
860 specific_bbval(BasicBlock *Val) : Val(Val) {}
861
862 template <typename ITy> bool match(ITy *V) {
863 const auto *BB = dyn_cast<BasicBlock>(V);
864 return BB && BB == Val;
865 }
866};
867
868/// Match a specific basic block value.
869inline specific_bbval m_SpecificBB(BasicBlock *BB) {
870 return specific_bbval(BB);
871}
872
873/// A commutative-friendly version of m_Specific().
874inline deferredval_ty<BasicBlock> m_Deferred(BasicBlock *const &BB) {
875 return BB;
876}
877inline deferredval_ty<const BasicBlock>
878m_Deferred(const BasicBlock *const &BB) {
879 return BB;
880}
881
882//===----------------------------------------------------------------------===//
883// Matcher for any binary operator.
884//
885template <typename LHS_t, typename RHS_t, bool Commutable = false>
886struct AnyBinaryOp_match {
887 LHS_t L;
888 RHS_t R;
889
890 // The evaluation order is always stable, regardless of Commutability.
891 // The LHS is always matched first.
892 AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
893
894 template <typename OpTy> bool match(OpTy *V) {
895 if (auto *I = dyn_cast<BinaryOperator>(V))
896 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
897 (Commutable && L.match(I->getOperand(1)) &&
898 R.match(I->getOperand(0)));
899 return false;
900 }
901};
902
903template <typename LHS, typename RHS>
904inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
905 return AnyBinaryOp_match<LHS, RHS>(L, R);
906}
907
908//===----------------------------------------------------------------------===//
909// Matcher for any unary operator.
910// TODO fuse unary, binary matcher into n-ary matcher
911//
912template <typename OP_t> struct AnyUnaryOp_match {
913 OP_t X;
914
915 AnyUnaryOp_match(const OP_t &X) : X(X) {}
916
917 template <typename OpTy> bool match(OpTy *V) {
918 if (auto *I = dyn_cast<UnaryOperator>(V))
919 return X.match(I->getOperand(0));
920 return false;
921 }
922};
923
924template <typename OP_t> inline AnyUnaryOp_match<OP_t> m_UnOp(const OP_t &X) {
925 return AnyUnaryOp_match<OP_t>(X);
926}
927
928//===----------------------------------------------------------------------===//
929// Matchers for specific binary operators.
930//
931
932template <typename LHS_t, typename RHS_t, unsigned Opcode,
933 bool Commutable = false>
934struct BinaryOp_match {
935 LHS_t L;
936 RHS_t R;
937
938 // The evaluation order is always stable, regardless of Commutability.
939 // The LHS is always matched first.
940 BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
941
942 template <typename OpTy> bool match(OpTy *V) {
943 if (V->getValueID() == Value::InstructionVal + Opcode) {
32
Assuming the condition is true
33
Taking true branch
944 auto *I = cast<BinaryOperator>(V);
34
'V' is a 'BinaryOperator'
945 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
35
Returning the value 1, which participates in a condition later
946 (Commutable && L.match(I->getOperand(1)) &&
947 R.match(I->getOperand(0)));
948 }
949 if (auto *CE = dyn_cast<ConstantExpr>(V))
950 return CE->getOpcode() == Opcode &&
951 ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
952 (Commutable && L.match(CE->getOperand(1)) &&
953 R.match(CE->getOperand(0))));
954 return false;
955 }
956};
957
958template <typename LHS, typename RHS>
959inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
960 const RHS &R) {
961 return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
962}
963
964template <typename LHS, typename RHS>
965inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
966 const RHS &R) {
967 return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
968}
969
970template <typename LHS, typename RHS>
971inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
972 const RHS &R) {
973 return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
974}
975
976template <typename LHS, typename RHS>
977inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
978 const RHS &R) {
979 return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
980}
981
982template <typename Op_t> struct FNeg_match {
983 Op_t X;
984
985 FNeg_match(const Op_t &Op) : X(Op) {}
986 template <typename OpTy> bool match(OpTy *V) {
987 auto *FPMO = dyn_cast<FPMathOperator>(V);
988 if (!FPMO) return false;
989
990 if (FPMO->getOpcode() == Instruction::FNeg)
991 return X.match(FPMO->getOperand(0));
992
993 if (FPMO->getOpcode() == Instruction::FSub) {
994 if (FPMO->hasNoSignedZeros()) {
995 // With 'nsz', any zero goes.
996 if (!cstfp_pred_ty<is_any_zero_fp>().match(FPMO->getOperand(0)))
997 return false;
998 } else {
999 // Without 'nsz', we need fsub -0.0, X exactly.
1000 if (!cstfp_pred_ty<is_neg_zero_fp>().match(FPMO->getOperand(0)))
1001 return false;
1002 }
1003
1004 return X.match(FPMO->getOperand(1));
1005 }
1006
1007 return false;
1008 }
1009};
1010
1011/// Match 'fneg X' as 'fsub -0.0, X'.
1012template <typename OpTy>
1013inline FNeg_match<OpTy>
1014m_FNeg(const OpTy &X) {
1015 return FNeg_match<OpTy>(X);
1016}
1017
1018/// Match 'fneg X' as 'fsub +-0.0, X'.
1019template <typename RHS>
1020inline BinaryOp_match<cstfp_pred_ty<is_any_zero_fp>, RHS, Instruction::FSub>
1021m_FNegNSZ(const RHS &X) {
1022 return m_FSub(m_AnyZeroFP(), X);
1023}
1024
1025template <typename LHS, typename RHS>
1026inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
1027 const RHS &R) {
1028 return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
1029}
1030
1031template <typename LHS, typename RHS>
1032inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
1033 const RHS &R) {
1034 return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
1035}
1036
1037template <typename LHS, typename RHS>
1038inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
1039 const RHS &R) {
1040 return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
1041}
1042
1043template <typename LHS, typename RHS>
1044inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
1045 const RHS &R) {
1046 return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
1047}
1048
1049template <typename LHS, typename RHS>
1050inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
1051 const RHS &R) {
1052 return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
1053}
1054
1055template <typename LHS, typename RHS>
1056inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
1057 const RHS &R) {
1058 return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
1059}
1060
1061template <typename LHS, typename RHS>
1062inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
1063 const RHS &R) {
1064 return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
1065}
1066
1067template <typename LHS, typename RHS>
1068inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
1069 const RHS &R) {
1070 return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
1071}
1072
1073template <typename LHS, typename RHS>
1074inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
1075 const RHS &R) {
1076 return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
1077}
1078
1079template <typename LHS, typename RHS>
1080inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
1081 const RHS &R) {
1082 return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
1083}
1084
1085template <typename LHS, typename RHS>
1086inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
1087 const RHS &R) {
1088 return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
1089}
1090
1091template <typename LHS, typename RHS>
1092inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
1093 const RHS &R) {
1094 return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
1095}
1096
1097template <typename LHS, typename RHS>
1098inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
1099 const RHS &R) {
1100 return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
1101}
1102
1103template <typename LHS, typename RHS>
1104inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
1105 const RHS &R) {
1106 return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
1107}
1108
1109template <typename LHS_t, typename RHS_t, unsigned Opcode,
1110 unsigned WrapFlags = 0>
1111struct OverflowingBinaryOp_match {
1112 LHS_t L;
1113 RHS_t R;
1114
1115 OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
1116 : L(LHS), R(RHS) {}
1117
1118 template <typename OpTy> bool match(OpTy *V) {
1119 if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
1120 if (Op->getOpcode() != Opcode)
1121 return false;
1122 if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
1123 !Op->hasNoUnsignedWrap())
1124 return false;
1125 if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
1126 !Op->hasNoSignedWrap())
1127 return false;
1128 return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
1129 }
1130 return false;
1131 }
1132};
1133
1134template <typename LHS, typename RHS>
1135inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1136 OverflowingBinaryOperator::NoSignedWrap>
1137m_NSWAdd(const LHS &L, const RHS &R) {
1138 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1139 OverflowingBinaryOperator::NoSignedWrap>(
1140 L, R);
1141}
1142template <typename LHS, typename RHS>
1143inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1144 OverflowingBinaryOperator::NoSignedWrap>
1145m_NSWSub(const LHS &L, const RHS &R) {
1146 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1147 OverflowingBinaryOperator::NoSignedWrap>(
1148 L, R);
1149}
1150template <typename LHS, typename RHS>
1151inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1152 OverflowingBinaryOperator::NoSignedWrap>
1153m_NSWMul(const LHS &L, const RHS &R) {
1154 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1155 OverflowingBinaryOperator::NoSignedWrap>(
1156 L, R);
1157}
1158template <typename LHS, typename RHS>
1159inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1160 OverflowingBinaryOperator::NoSignedWrap>
1161m_NSWShl(const LHS &L, const RHS &R) {
1162 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1163 OverflowingBinaryOperator::NoSignedWrap>(
1164 L, R);
1165}
1166
1167template <typename LHS, typename RHS>
1168inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1169 OverflowingBinaryOperator::NoUnsignedWrap>
1170m_NUWAdd(const LHS &L, const RHS &R) {
1171 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1172 OverflowingBinaryOperator::NoUnsignedWrap>(
1173 L, R);
1174}
1175template <typename LHS, typename RHS>
1176inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1177 OverflowingBinaryOperator::NoUnsignedWrap>
1178m_NUWSub(const LHS &L, const RHS &R) {
1179 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1180 OverflowingBinaryOperator::NoUnsignedWrap>(
1181 L, R);
1182}
1183template <typename LHS, typename RHS>
1184inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1185 OverflowingBinaryOperator::NoUnsignedWrap>
1186m_NUWMul(const LHS &L, const RHS &R) {
1187 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1188 OverflowingBinaryOperator::NoUnsignedWrap>(
1189 L, R);
1190}
1191template <typename LHS, typename RHS>
1192inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1193 OverflowingBinaryOperator::NoUnsignedWrap>
1194m_NUWShl(const LHS &L, const RHS &R) {
1195 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1196 OverflowingBinaryOperator::NoUnsignedWrap>(
1197 L, R);
1198}
1199
1200//===----------------------------------------------------------------------===//
1201// Class that matches a group of binary opcodes.
1202//
1203template <typename LHS_t, typename RHS_t, typename Predicate>
1204struct BinOpPred_match : Predicate {
1205 LHS_t L;
1206 RHS_t R;
1207
1208 BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1209
1210 template <typename OpTy> bool match(OpTy *V) {
1211 if (auto *I = dyn_cast<Instruction>(V))
1212 return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
1213 R.match(I->getOperand(1));
1214 if (auto *CE = dyn_cast<ConstantExpr>(V))
1215 return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
1216 R.match(CE->getOperand(1));
1217 return false;
1218 }
1219};
1220
1221struct is_shift_op {
1222 bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
1223};
1224
1225struct is_right_shift_op {
1226 bool isOpType(unsigned Opcode) {
1227 return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
1228 }
1229};
1230
1231struct is_logical_shift_op {
1232 bool isOpType(unsigned Opcode) {
1233 return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
1234 }
1235};
1236
1237struct is_bitwiselogic_op {
1238 bool isOpType(unsigned Opcode) {
1239 return Instruction::isBitwiseLogicOp(Opcode);
1240 }
1241};
1242
1243struct is_idiv_op {
1244 bool isOpType(unsigned Opcode) {
1245 return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
1246 }
1247};
1248
1249struct is_irem_op {
1250 bool isOpType(unsigned Opcode) {
1251 return Opcode == Instruction::SRem || Opcode == Instruction::URem;
1252 }
1253};
1254
1255/// Matches shift operations.
1256template <typename LHS, typename RHS>
1257inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
1258 const RHS &R) {
1259 return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
26
Returning without writing to 'R.R.VR'
1260}
1261
1262/// Matches logical shift operations.
1263template <typename LHS, typename RHS>
1264inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
1265 const RHS &R) {
1266 return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
1267}
1268
1269/// Matches logical shift operations.
1270template <typename LHS, typename RHS>
1271inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
1272m_LogicalShift(const LHS &L, const RHS &R) {
1273 return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
1274}
1275
1276/// Matches bitwise logic operations.
1277template <typename LHS, typename RHS>
1278inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
1279m_BitwiseLogic(const LHS &L, const RHS &R) {
1280 return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
1281}
1282
1283/// Matches integer division operations.
1284template <typename LHS, typename RHS>
1285inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
1286 const RHS &R) {
1287 return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
1288}
1289
1290/// Matches integer remainder operations.
1291template <typename LHS, typename RHS>
1292inline BinOpPred_match<LHS, RHS, is_irem_op> m_IRem(const LHS &L,
1293 const RHS &R) {
1294 return BinOpPred_match<LHS, RHS, is_irem_op>(L, R);
1295}
1296
1297//===----------------------------------------------------------------------===//
1298// Class that matches exact binary ops.
1299//
1300template <typename SubPattern_t> struct Exact_match {
1301 SubPattern_t SubPattern;
1302
1303 Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
1304
1305 template <typename OpTy> bool match(OpTy *V) {
1306 if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
1307 return PEO->isExact() && SubPattern.match(V);
1308 return false;
1309 }
1310};
1311
1312template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
1313 return SubPattern;
1314}
1315
1316//===----------------------------------------------------------------------===//
1317// Matchers for CmpInst classes
1318//
1319
1320template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
1321 bool Commutable = false>
1322struct CmpClass_match {
1323 PredicateTy &Predicate;
1324 LHS_t L;
1325 RHS_t R;
1326
1327 // The evaluation order is always stable, regardless of Commutability.
1328 // The LHS is always matched first.
1329 CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
1330 : Predicate(Pred), L(LHS), R(RHS) {}
1331
1332 template <typename OpTy> bool match(OpTy *V) {
1333 if (auto *I = dyn_cast<Class>(V)) {
1334 if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
1335 Predicate = I->getPredicate();
1336 return true;
1337 } else if (Commutable && L.match(I->getOperand(1)) &&
1338 R.match(I->getOperand(0))) {
1339 Predicate = I->getSwappedPredicate();
1340 return true;
1341 }
1342 }
1343 return false;
1344 }
1345};
1346
1347template <typename LHS, typename RHS>
1348inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
1349m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1350 return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
1351}
1352
1353template <typename LHS, typename RHS>
1354inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
1355m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1356 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
1357}
1358
1359template <typename LHS, typename RHS>
1360inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
1361m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1362 return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
1363}
1364
1365//===----------------------------------------------------------------------===//
1366// Matchers for instructions with a given opcode and number of operands.
1367//
1368
1369/// Matches instructions with Opcode and three operands.
1370template <typename T0, unsigned Opcode> struct OneOps_match {
1371 T0 Op1;
1372
1373 OneOps_match(const T0 &Op1) : Op1(Op1) {}
1374
1375 template <typename OpTy> bool match(OpTy *V) {
1376 if (V->getValueID() == Value::InstructionVal + Opcode) {
1377 auto *I = cast<Instruction>(V);
1378 return Op1.match(I->getOperand(0));
1379 }
1380 return false;
1381 }
1382};
1383
1384/// Matches instructions with Opcode and three operands.
1385template <typename T0, typename T1, unsigned Opcode> struct TwoOps_match {
1386 T0 Op1;
1387 T1 Op2;
1388
1389 TwoOps_match(const T0 &Op1, const T1 &Op2) : Op1(Op1), Op2(Op2) {}
1390
1391 template <typename OpTy> bool match(OpTy *V) {
1392 if (V->getValueID() == Value::InstructionVal + Opcode) {
1393 auto *I = cast<Instruction>(V);
1394 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1));
1395 }
1396 return false;
1397 }
1398};
1399
1400/// Matches instructions with Opcode and three operands.
1401template <typename T0, typename T1, typename T2, unsigned Opcode>
1402struct ThreeOps_match {
1403 T0 Op1;
1404 T1 Op2;
1405 T2 Op3;
1406
1407 ThreeOps_match(const T0 &Op1, const T1 &Op2, const T2 &Op3)
1408 : Op1(Op1), Op2(Op2), Op3(Op3) {}
1409
1410 template <typename OpTy> bool match(OpTy *V) {
1411 if (V->getValueID() == Value::InstructionVal + Opcode) {
1412 auto *I = cast<Instruction>(V);
1413 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
1414 Op3.match(I->getOperand(2));
1415 }
1416 return false;
1417 }
1418};
1419
1420/// Matches SelectInst.
1421template <typename Cond, typename LHS, typename RHS>
1422inline ThreeOps_match<Cond, LHS, RHS, Instruction::Select>
1423m_Select(const Cond &C, const LHS &L, const RHS &R) {
1424 return ThreeOps_match<Cond, LHS, RHS, Instruction::Select>(C, L, R);
1425}
1426
1427/// This matches a select of two constants, e.g.:
1428/// m_SelectCst<-1, 0>(m_Value(V))
1429template <int64_t L, int64_t R, typename Cond>
1430inline ThreeOps_match<Cond, constantint_match<L>, constantint_match<R>,
1431 Instruction::Select>
1432m_SelectCst(const Cond &C) {
1433 return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
1434}
1435
1436/// Matches FreezeInst.
1437template <typename OpTy>
1438inline OneOps_match<OpTy, Instruction::Freeze> m_Freeze(const OpTy &Op) {
1439 return OneOps_match<OpTy, Instruction::Freeze>(Op);
1440}
1441
1442/// Matches InsertElementInst.
1443template <typename Val_t, typename Elt_t, typename Idx_t>
1444inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>
1445m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
1446 return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>(
1447 Val, Elt, Idx);
1448}
1449
1450/// Matches ExtractElementInst.
1451template <typename Val_t, typename Idx_t>
1452inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>
1453m_ExtractElt(const Val_t &Val, const Idx_t &Idx) {
1454 return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx);
1455}
1456
1457/// Matches shuffle.
1458template <typename T0, typename T1, typename T2> struct Shuffle_match {
1459 T0 Op1;
1460 T1 Op2;
1461 T2 Mask;
1462
1463 Shuffle_match(const T0 &Op1, const T1 &Op2, const T2 &Mask)
1464 : Op1(Op1), Op2(Op2), Mask(Mask) {}
1465
1466 template <typename OpTy> bool match(OpTy *V) {
1467 if (auto *I = dyn_cast<ShuffleVectorInst>(V)) {
1468 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
1469 Mask.match(I->getShuffleMask());
1470 }
1471 return false;
1472 }
1473};
1474
1475struct m_Mask {
1476 ArrayRef<int> &MaskRef;
1477 m_Mask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
1478 bool match(ArrayRef<int> Mask) {
1479 MaskRef = Mask;
1480 return true;
1481 }
1482};
1483
1484struct m_ZeroMask {
1485 bool match(ArrayRef<int> Mask) {
1486 return all_of(Mask, [](int Elem) { return Elem == 0 || Elem == -1; });
1487 }
1488};
1489
1490struct m_SpecificMask {
1491 ArrayRef<int> &MaskRef;
1492 m_SpecificMask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
1493 bool match(ArrayRef<int> Mask) { return MaskRef == Mask; }
1494};
1495
1496struct m_SplatOrUndefMask {
1497 int &SplatIndex;
1498 m_SplatOrUndefMask(int &SplatIndex) : SplatIndex(SplatIndex) {}
1499 bool match(ArrayRef<int> Mask) {
1500 auto First = find_if(Mask, [](int Elem) { return Elem != -1; });
1501 if (First == Mask.end())
1502 return false;
1503 SplatIndex = *First;
1504 return all_of(Mask,
1505 [First](int Elem) { return Elem == *First || Elem == -1; });
1506 }
1507};
1508
1509/// Matches ShuffleVectorInst independently of mask value.
1510template <typename V1_t, typename V2_t>
1511inline TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>
1512m_Shuffle(const V1_t &v1, const V2_t &v2) {
1513 return TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>(v1, v2);
1514}
1515
1516template <typename V1_t, typename V2_t, typename Mask_t>
1517inline Shuffle_match<V1_t, V2_t, Mask_t>
1518m_Shuffle(const V1_t &v1, const V2_t &v2, const Mask_t &mask) {
1519 return Shuffle_match<V1_t, V2_t, Mask_t>(v1, v2, mask);
1520}
1521
1522/// Matches LoadInst.
1523template <typename OpTy>
1524inline OneOps_match<OpTy, Instruction::Load> m_Load(const OpTy &Op) {
1525 return OneOps_match<OpTy, Instruction::Load>(Op);
1526}
1527
1528/// Matches StoreInst.
1529template <typename ValueOpTy, typename PointerOpTy>
1530inline TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>
1531m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp) {
1532 return TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>(ValueOp,
1533 PointerOp);
1534}
1535
1536//===----------------------------------------------------------------------===//
1537// Matchers for CastInst classes
1538//
1539
1540template <typename Op_t, unsigned Opcode> struct CastClass_match {
1541 Op_t Op;
1542
1543 CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
1544
1545 template <typename OpTy> bool match(OpTy *V) {
1546 if (auto *O = dyn_cast<Operator>(V))
1547 return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
1548 return false;
1549 }
1550};
1551
1552/// Matches BitCast.
1553template <typename OpTy>
1554inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
1555 return CastClass_match<OpTy, Instruction::BitCast>(Op);
1556}
1557
1558/// Matches PtrToInt.
1559template <typename OpTy>
1560inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
1561 return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
1562}
1563
1564/// Matches IntToPtr.
1565template <typename OpTy>
1566inline CastClass_match<OpTy, Instruction::IntToPtr> m_IntToPtr(const OpTy &Op) {
1567 return CastClass_match<OpTy, Instruction::IntToPtr>(Op);
1568}
1569
1570/// Matches Trunc.
1571template <typename OpTy>
1572inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
1573 return CastClass_match<OpTy, Instruction::Trunc>(Op);
1574}
1575
1576template <typename OpTy>
1577inline match_combine_or<CastClass_match<OpTy, Instruction::Trunc>, OpTy>
1578m_TruncOrSelf(const OpTy &Op) {
1579 return m_CombineOr(m_Trunc(Op), Op);
1580}
1581
1582/// Matches SExt.
1583template <typename OpTy>
1584inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
1585 return CastClass_match<OpTy, Instruction::SExt>(Op);
1586}
1587
1588/// Matches ZExt.
1589template <typename OpTy>
1590inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
1591 return CastClass_match<OpTy, Instruction::ZExt>(Op);
18
Returning without writing to 'Op.VR'
1592}
1593
1594template <typename OpTy>
1595inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, OpTy>
1596m_ZExtOrSelf(const OpTy &Op) {
1597 return m_CombineOr(m_ZExt(Op), Op);
17
Calling 'm_ZExt<llvm::PatternMatch::bind_ty<llvm::Value>>'
19
Returning from 'm_ZExt<llvm::PatternMatch::bind_ty<llvm::Value>>'
20
Calling 'm_CombineOr<llvm::PatternMatch::CastClass_match<llvm::PatternMatch::bind_ty<llvm::Value>, 39>, llvm::PatternMatch::bind_ty<llvm::Value>>'
22
Returning from 'm_CombineOr<llvm::PatternMatch::CastClass_match<llvm::PatternMatch::bind_ty<llvm::Value>, 39>, llvm::PatternMatch::bind_ty<llvm::Value>>'
23
Returning without writing to 'Op.VR'
1598}
1599
1600template <typename OpTy>
1601inline match_combine_or<CastClass_match<OpTy, Instruction::SExt>, OpTy>
1602m_SExtOrSelf(const OpTy &Op) {
1603 return m_CombineOr(m_SExt(Op), Op);
1604}
1605
1606template <typename OpTy>
1607inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
1608 CastClass_match<OpTy, Instruction::SExt>>
1609m_ZExtOrSExt(const OpTy &Op) {
1610 return m_CombineOr(m_ZExt(Op), m_SExt(Op));
1611}
1612
1613template <typename OpTy>
1614inline match_combine_or<
1615 match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
1616 CastClass_match<OpTy, Instruction::SExt>>,
1617 OpTy>
1618m_ZExtOrSExtOrSelf(const OpTy &Op) {
1619 return m_CombineOr(m_ZExtOrSExt(Op), Op);
1620}
1621
1622template <typename OpTy>
1623inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
1624 return CastClass_match<OpTy, Instruction::UIToFP>(Op);
1625}
1626
1627template <typename OpTy>
1628inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
1629 return CastClass_match<OpTy, Instruction::SIToFP>(Op);
1630}
1631
1632template <typename OpTy>
1633inline CastClass_match<OpTy, Instruction::FPToUI> m_FPToUI(const OpTy &Op) {
1634 return CastClass_match<OpTy, Instruction::FPToUI>(Op);
1635}
1636
1637template <typename OpTy>
1638inline CastClass_match<OpTy, Instruction::FPToSI> m_FPToSI(const OpTy &Op) {
1639 return CastClass_match<OpTy, Instruction::FPToSI>(Op);
1640}
1641
1642template <typename OpTy>
1643inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
1644 return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
1645}
1646
1647template <typename OpTy>
1648inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
1649 return CastClass_match<OpTy, Instruction::FPExt>(Op);
1650}
1651
1652//===----------------------------------------------------------------------===//
1653// Matchers for control flow.
1654//
1655
1656struct br_match {
1657 BasicBlock *&Succ;
1658
1659 br_match(BasicBlock *&Succ) : Succ(Succ) {}
1660
1661 template <typename OpTy> bool match(OpTy *V) {
1662 if (auto *BI = dyn_cast<BranchInst>(V))
1663 if (BI->isUnconditional()) {
1664 Succ = BI->getSuccessor(0);
1665 return true;
1666 }
1667 return false;
1668 }
1669};
1670
1671inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
1672
1673template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
1674struct brc_match {
1675 Cond_t Cond;
1676 TrueBlock_t T;
1677 FalseBlock_t F;
1678
1679 brc_match(const Cond_t &C, const TrueBlock_t &t, const FalseBlock_t &f)
1680 : Cond(C), T(t), F(f) {}
1681
1682 template <typename OpTy> bool match(OpTy *V) {
1683 if (auto *BI = dyn_cast<BranchInst>(V))
1684 if (BI->isConditional() && Cond.match(BI->getCondition()))
1685 return T.match(BI->getSuccessor(0)) && F.match(BI->getSuccessor(1));
1686 return false;
1687 }
1688};
1689
1690template <typename Cond_t>
1691inline brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>
1692m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
1693 return brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>(
1694 C, m_BasicBlock(T), m_BasicBlock(F));
1695}
1696
1697template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
1698inline brc_match<Cond_t, TrueBlock_t, FalseBlock_t>
1699m_Br(const Cond_t &C, const TrueBlock_t &T, const FalseBlock_t &F) {
1700 return brc_match<Cond_t, TrueBlock_t, FalseBlock_t>(C, T, F);
1701}
1702
1703//===----------------------------------------------------------------------===//
1704// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
1705//
1706
1707template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
1708 bool Commutable = false>
1709struct MaxMin_match {
1710 using PredType = Pred_t;
1711 LHS_t L;
1712 RHS_t R;
1713
1714 // The evaluation order is always stable, regardless of Commutability.
1715 // The LHS is always matched first.
1716 MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1717
1718 template <typename OpTy> bool match(OpTy *V) {
1719 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1720 Intrinsic::ID IID = II->getIntrinsicID();
1721 if ((IID == Intrinsic::smax && Pred_t::match(ICmpInst::ICMP_SGT)) ||
1722 (IID == Intrinsic::smin && Pred_t::match(ICmpInst::ICMP_SLT)) ||
1723 (IID == Intrinsic::umax && Pred_t::match(ICmpInst::ICMP_UGT)) ||
1724 (IID == Intrinsic::umin && Pred_t::match(ICmpInst::ICMP_ULT))) {
1725 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
1726 return (L.match(LHS) && R.match(RHS)) ||
1727 (Commutable && L.match(RHS) && R.match(LHS));
1728 }
1729 }
1730 // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
1731 auto *SI = dyn_cast<SelectInst>(V);
1732 if (!SI)
1733 return false;
1734 auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
1735 if (!Cmp)
1736 return false;
1737 // At this point we have a select conditioned on a comparison. Check that
1738 // it is the values returned by the select that are being compared.
1739 auto *TrueVal = SI->getTrueValue();
1740 auto *FalseVal = SI->getFalseValue();
1741 auto *LHS = Cmp->getOperand(0);
1742 auto *RHS = Cmp->getOperand(1);
1743 if ((TrueVal != LHS || FalseVal != RHS) &&
1744 (TrueVal != RHS || FalseVal != LHS))
1745 return false;
1746 typename CmpInst_t::Predicate Pred =
1747 LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
1748 // Does "(x pred y) ? x : y" represent the desired max/min operation?
1749 if (!Pred_t::match(Pred))
1750 return false;
1751 // It does! Bind the operands.
1752 return (L.match(LHS) && R.match(RHS)) ||
1753 (Commutable && L.match(RHS) && R.match(LHS));
1754 }
1755};
1756
1757/// Helper class for identifying signed max predicates.
1758struct smax_pred_ty {
1759 static bool match(ICmpInst::Predicate Pred) {
1760 return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
1761 }
1762};
1763
1764/// Helper class for identifying signed min predicates.
1765struct smin_pred_ty {
1766 static bool match(ICmpInst::Predicate Pred) {
1767 return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
1768 }
1769};
1770
1771/// Helper class for identifying unsigned max predicates.
1772struct umax_pred_ty {
1773 static bool match(ICmpInst::Predicate Pred) {
1774 return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
1775 }
1776};
1777
1778/// Helper class for identifying unsigned min predicates.
1779struct umin_pred_ty {
1780 static bool match(ICmpInst::Predicate Pred) {
1781 return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
1782 }
1783};
1784
1785/// Helper class for identifying ordered max predicates.
1786struct ofmax_pred_ty {
1787 static bool match(FCmpInst::Predicate Pred) {
1788 return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
1789 }
1790};
1791
1792/// Helper class for identifying ordered min predicates.
1793struct ofmin_pred_ty {
1794 static bool match(FCmpInst::Predicate Pred) {
1795 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
1796 }
1797};
1798
1799/// Helper class for identifying unordered max predicates.
1800struct ufmax_pred_ty {
1801 static bool match(FCmpInst::Predicate Pred) {
1802 return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
1803 }
1804};
1805
1806/// Helper class for identifying unordered min predicates.
1807struct ufmin_pred_ty {
1808 static bool match(FCmpInst::Predicate Pred) {
1809 return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
1810 }
1811};
1812
1813template <typename LHS, typename RHS>
1814inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
1815 const RHS &R) {
1816 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
1817}
1818
1819template <typename LHS, typename RHS>
1820inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
1821 const RHS &R) {
1822 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
1823}
1824
1825template <typename LHS, typename RHS>
1826inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
1827 const RHS &R) {
1828 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
1829}
1830
1831template <typename LHS, typename RHS>
1832inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
1833 const RHS &R) {
1834 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
1835}
1836
1837template <typename LHS, typename RHS>
1838inline match_combine_or<
1839 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>,
1840 MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>>,
1841 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>,
1842 MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>>>
1843m_MaxOrMin(const LHS &L, const RHS &R) {
1844 return m_CombineOr(m_CombineOr(m_SMax(L, R), m_SMin(L, R)),
1845 m_CombineOr(m_UMax(L, R), m_UMin(L, R)));
1846}
1847
1848/// Match an 'ordered' floating point maximum function.
1849/// Floating point has one special value 'NaN'. Therefore, there is no total
1850/// order. However, if we can ignore the 'NaN' value (for example, because of a
1851/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1852/// semantics. In the presence of 'NaN' we have to preserve the original
1853/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
1854///
1855/// max(L, R) iff L and R are not NaN
1856/// m_OrdFMax(L, R) = R iff L or R are NaN
1857template <typename LHS, typename RHS>
1858inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
1859 const RHS &R) {
1860 return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
1861}
1862
1863/// Match an 'ordered' floating point minimum function.
1864/// Floating point has one special value 'NaN'. Therefore, there is no total
1865/// order. However, if we can ignore the 'NaN' value (for example, because of a
1866/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1867/// semantics. In the presence of 'NaN' we have to preserve the original
1868/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
1869///
1870/// min(L, R) iff L and R are not NaN
1871/// m_OrdFMin(L, R) = R iff L or R are NaN
1872template <typename LHS, typename RHS>
1873inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
1874 const RHS &R) {
1875 return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
1876}
1877
1878/// Match an 'unordered' floating point maximum function.
1879/// Floating point has one special value 'NaN'. Therefore, there is no total
1880/// order. However, if we can ignore the 'NaN' value (for example, because of a
1881/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1882/// semantics. In the presence of 'NaN' we have to preserve the original
1883/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
1884///
1885/// max(L, R) iff L and R are not NaN
1886/// m_UnordFMax(L, R) = L iff L or R are NaN
1887template <typename LHS, typename RHS>
1888inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
1889m_UnordFMax(const LHS &L, const RHS &R) {
1890 return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
1891}
1892
1893/// Match an 'unordered' floating point minimum function.
1894/// Floating point has one special value 'NaN'. Therefore, there is no total
1895/// order. However, if we can ignore the 'NaN' value (for example, because of a
1896/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1897/// semantics. In the presence of 'NaN' we have to preserve the original
1898/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
1899///
1900/// min(L, R) iff L and R are not NaN
1901/// m_UnordFMin(L, R) = L iff L or R are NaN
1902template <typename LHS, typename RHS>
1903inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
1904m_UnordFMin(const LHS &L, const RHS &R) {
1905 return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
1906}
1907
1908//===----------------------------------------------------------------------===//
1909// Matchers for overflow check patterns: e.g. (a + b) u< a, (a ^ -1) <u b
1910// Note that S might be matched to other instructions than AddInst.
1911//
1912
1913template <typename LHS_t, typename RHS_t, typename Sum_t>
1914struct UAddWithOverflow_match {
1915 LHS_t L;
1916 RHS_t R;
1917 Sum_t S;
1918
1919 UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
1920 : L(L), R(R), S(S) {}
1921
1922 template <typename OpTy> bool match(OpTy *V) {
1923 Value *ICmpLHS, *ICmpRHS;
1924 ICmpInst::Predicate Pred;
1925 if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
1926 return false;
1927
1928 Value *AddLHS, *AddRHS;
1929 auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
1930
1931 // (a + b) u< a, (a + b) u< b
1932 if (Pred == ICmpInst::ICMP_ULT)
1933 if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
1934 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1935
1936 // a >u (a + b), b >u (a + b)
1937 if (Pred == ICmpInst::ICMP_UGT)
1938 if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
1939 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1940
1941 Value *Op1;
1942 auto XorExpr = m_OneUse(m_Xor(m_Value(Op1), m_AllOnes()));
1943 // (a ^ -1) <u b
1944 if (Pred == ICmpInst::ICMP_ULT) {
1945 if (XorExpr.match(ICmpLHS))
1946 return L.match(Op1) && R.match(ICmpRHS) && S.match(ICmpLHS);
1947 }
1948 // b > u (a ^ -1)
1949 if (Pred == ICmpInst::ICMP_UGT) {
1950 if (XorExpr.match(ICmpRHS))
1951 return L.match(Op1) && R.match(ICmpLHS) && S.match(ICmpRHS);
1952 }
1953
1954 // Match special-case for increment-by-1.
1955 if (Pred == ICmpInst::ICMP_EQ) {
1956 // (a + 1) == 0
1957 // (1 + a) == 0
1958 if (AddExpr.match(ICmpLHS) && m_ZeroInt().match(ICmpRHS) &&
1959 (m_One().match(AddLHS) || m_One().match(AddRHS)))
1960 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1961 // 0 == (a + 1)
1962 // 0 == (1 + a)
1963 if (m_ZeroInt().match(ICmpLHS) && AddExpr.match(ICmpRHS) &&
1964 (m_One().match(AddLHS) || m_One().match(AddRHS)))
1965 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1966 }
1967
1968 return false;
1969 }
1970};
1971
1972/// Match an icmp instruction checking for unsigned overflow on addition.
1973///
1974/// S is matched to the addition whose result is being checked for overflow, and
1975/// L and R are matched to the LHS and RHS of S.
1976template <typename LHS_t, typename RHS_t, typename Sum_t>
1977UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
1978m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
1979 return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
1980}
1981
1982template <typename Opnd_t> struct Argument_match {
1983 unsigned OpI;
1984 Opnd_t Val;
1985
1986 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
1987
1988 template <typename OpTy> bool match(OpTy *V) {
1989 // FIXME: Should likely be switched to use `CallBase`.
1990 if (const auto *CI = dyn_cast<CallInst>(V))
1991 return Val.match(CI->getArgOperand(OpI));
1992 return false;
1993 }
1994};
1995
1996/// Match an argument.
1997template <unsigned OpI, typename Opnd_t>
1998inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
1999 return Argument_match<Opnd_t>(OpI, Op);
2000}
2001
2002/// Intrinsic matchers.
2003struct IntrinsicID_match {
2004 unsigned ID;
2005
2006 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
2007
2008 template <typename OpTy> bool match(OpTy *V) {
2009 if (const auto *CI = dyn_cast<CallInst>(V))
2010 if (const auto *F = CI->getCalledFunction())
2011 return F->getIntrinsicID() == ID;
2012 return false;
2013 }
2014};
2015
2016/// Intrinsic matches are combinations of ID matchers, and argument
2017/// matchers. Higher arity matcher are defined recursively in terms of and-ing
2018/// them with lower arity matchers. Here's some convenient typedefs for up to
2019/// several arguments, and more can be added as needed
2020template <typename T0 = void, typename T1 = void, typename T2 = void,
2021 typename T3 = void, typename T4 = void, typename T5 = void,
2022 typename T6 = void, typename T7 = void, typename T8 = void,
2023 typename T9 = void, typename T10 = void>
2024struct m_Intrinsic_Ty;
2025template <typename T0> struct m_Intrinsic_Ty<T0> {
2026 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
2027};
2028template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
2029 using Ty =
2030 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
2031};
2032template <typename T0, typename T1, typename T2>
2033struct m_Intrinsic_Ty<T0, T1, T2> {
2034 using Ty =
2035 match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
2036 Argument_match<T2>>;
2037};
2038template <typename T0, typename T1, typename T2, typename T3>
2039struct m_Intrinsic_Ty<T0, T1, T2, T3> {
2040 using Ty =
2041 match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
2042 Argument_match<T3>>;
2043};
2044
2045template <typename T0, typename T1, typename T2, typename T3, typename T4>
2046struct m_Intrinsic_Ty<T0, T1, T2, T3, T4> {
2047 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty,
2048 Argument_match<T4>>;
2049};
2050
2051template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
2052struct m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5> {
2053 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2, T3, T4>::Ty,
2054 Argument_match<T5>>;
2055};
2056
2057/// Match intrinsic calls like this:
2058/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
2059template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
2060 return IntrinsicID_match(IntrID);
2061}
2062
2063template <Intrinsic::ID IntrID, typename T0>
2064inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
2065 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
2066}
2067
2068template <Intrinsic::ID IntrID, typename T0, typename T1>
2069inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
2070 const T1 &Op1) {
2071 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
2072}
2073
2074template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
2075inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
2076m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
2077 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
2078}
2079
2080template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
2081 typename T3>
2082inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
2083m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
2084 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
2085}
2086
2087template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
2088 typename T3, typename T4>
2089inline typename m_Intrinsic_Ty<T0, T1, T2, T3, T4>::Ty
2090m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
2091 const T4 &Op4) {
2092 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2, Op3),
2093 m_Argument<4>(Op4));
2094}
2095
2096template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
2097 typename T3, typename T4, typename T5>
2098inline typename m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5>::Ty
2099m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
2100 const T4 &Op4, const T5 &Op5) {
2101 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2, Op3, Op4),
2102 m_Argument<5>(Op5));
2103}
2104
2105// Helper intrinsic matching specializations.
2106template <typename Opnd0>
2107inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
2108 return m_Intrinsic<Intrinsic::bitreverse>(Op0);
2109}
2110
2111template <typename Opnd0>
2112inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
2113 return m_Intrinsic<Intrinsic::bswap>(Op0);
2114}
2115
2116template <typename Opnd0>
2117inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FAbs(const Opnd0 &Op0) {
2118 return m_Intrinsic<Intrinsic::fabs>(Op0);
2119}
2120
2121template <typename Opnd0>
2122inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FCanonicalize(const Opnd0 &Op0) {
2123 return m_Intrinsic<Intrinsic::canonicalize>(Op0);
2124}
2125
2126template <typename Opnd0, typename Opnd1>
2127inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
2128 const Opnd1 &Op1) {
2129 return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
2130}
2131
2132template <typename Opnd0, typename Opnd1>
2133inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
2134 const Opnd1 &Op1) {
2135 return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
2136}
2137
2138template <typename Opnd0, typename Opnd1, typename Opnd2>
2139inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2>::Ty
2140m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2) {
2141 return m_Intrinsic<Intrinsic::fshl>(Op0, Op1, Op2);
2142}
2143
2144template <typename Opnd0, typename Opnd1, typename Opnd2>
2145inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2>::Ty
2146m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2) {
2147 return m_Intrinsic<Intrinsic::fshr>(Op0, Op1, Op2);
2148}
2149
2150//===----------------------------------------------------------------------===//
2151// Matchers for two-operands operators with the operators in either order
2152//
2153
2154/// Matches a BinaryOperator with LHS and RHS in either order.
2155template <typename LHS, typename RHS>
2156inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
2157 return AnyBinaryOp_match<LHS, RHS, true>(L, R);
2158}
2159
2160/// Matches an ICmp with a predicate over LHS and RHS in either order.
2161/// Swaps the predicate if operands are commuted.
2162template <typename LHS, typename RHS>
2163inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
2164m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
2165 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
2166 R);
2167}
2168
2169/// Matches a Add with LHS and RHS in either order.
2170template <typename LHS, typename RHS>
2171inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
2172 const RHS &R) {
2173 return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
2174}
2175
2176/// Matches a Mul with LHS and RHS in either order.
2177template <typename LHS, typename RHS>
2178inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
2179 const RHS &R) {
2180 return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
2181}
2182
2183/// Matches an And with LHS and RHS in either order.
2184template <typename LHS, typename RHS>
2185inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
2186 const RHS &R) {
2187 return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
2188}
2189
2190/// Matches an Or with LHS and RHS in either order.
2191template <typename LHS, typename RHS>
2192inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
2193 const RHS &R) {
2194 return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
2195}
2196
2197/// Matches an Xor with LHS and RHS in either order.
2198template <typename LHS, typename RHS>
2199inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
2200 const RHS &R) {
2201 return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
2202}
2203
2204/// Matches a 'Neg' as 'sub 0, V'.
2205template <typename ValTy>
2206inline BinaryOp_match<cst_pred_ty<is_zero_int>, ValTy, Instruction::Sub>
2207m_Neg(const ValTy &V) {
2208 return m_Sub(m_ZeroInt(), V);
2209}
2210
2211/// Matches a 'Neg' as 'sub nsw 0, V'.
2212template <typename ValTy>
2213inline OverflowingBinaryOp_match<cst_pred_ty<is_zero_int>, ValTy,
2214 Instruction::Sub,
2215 OverflowingBinaryOperator::NoSignedWrap>
2216m_NSWNeg(const ValTy &V) {
2217 return m_NSWSub(m_ZeroInt(), V);
2218}
2219
2220/// Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
2221template <typename ValTy>
2222inline BinaryOp_match<ValTy, cst_pred_ty<is_all_ones>, Instruction::Xor, true>
2223m_Not(const ValTy &V) {
2224 return m_c_Xor(V, m_AllOnes());
2225}
2226
2227/// Matches an SMin with LHS and RHS in either order.
2228template <typename LHS, typename RHS>
2229inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
2230m_c_SMin(const LHS &L, const RHS &R) {
2231 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
2232}
2233/// Matches an SMax with LHS and RHS in either order.
2234template <typename LHS, typename RHS>
2235inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
2236m_c_SMax(const LHS &L, const RHS &R) {
2237 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
2238}
2239/// Matches a UMin with LHS and RHS in either order.
2240template <typename LHS, typename RHS>
2241inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
2242m_c_UMin(const LHS &L, const RHS &R) {
2243 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
2244}
2245/// Matches a UMax with LHS and RHS in either order.
2246template <typename LHS, typename RHS>
2247inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
2248m_c_UMax(const LHS &L, const RHS &R) {
2249 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
2250}
2251
2252template <typename LHS, typename RHS>
2253inline match_combine_or<
2254 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>,
2255 MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>>,
2256 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>,
2257 MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>>>
2258m_c_MaxOrMin(const LHS &L, const RHS &R) {
2259 return m_CombineOr(m_CombineOr(m_c_SMax(L, R), m_c_SMin(L, R)),
2260 m_CombineOr(m_c_UMax(L, R), m_c_UMin(L, R)));
2261}
2262
2263/// Matches FAdd with LHS and RHS in either order.
2264template <typename LHS, typename RHS>
2265inline BinaryOp_match<LHS, RHS, Instruction::FAdd, true>
2266m_c_FAdd(const LHS &L, const RHS &R) {
2267 return BinaryOp_match<LHS, RHS, Instruction::FAdd, true>(L, R);
2268}
2269
2270/// Matches FMul with LHS and RHS in either order.
2271template <typename LHS, typename RHS>
2272inline BinaryOp_match<LHS, RHS, Instruction::FMul, true>
2273m_c_FMul(const LHS &L, const RHS &R) {
2274 return BinaryOp_match<LHS, RHS, Instruction::FMul, true>(L, R);
2275}
2276
2277template <typename Opnd_t> struct Signum_match {
2278 Opnd_t Val;
2279 Signum_match(const Opnd_t &V) : Val(V) {}
2280
2281 template <typename OpTy> bool match(OpTy *V) {
2282 unsigned TypeSize = V->getType()->getScalarSizeInBits();
2283 if (TypeSize == 0)
2284 return false;
2285
2286 unsigned ShiftWidth = TypeSize - 1;
2287 Value *OpL = nullptr, *OpR = nullptr;
2288
2289 // This is the representation of signum we match:
2290 //
2291 // signum(x) == (x >> 63) | (-x >>u 63)
2292 //
2293 // An i1 value is its own signum, so it's correct to match
2294 //
2295 // signum(x) == (x >> 0) | (-x >>u 0)
2296 //
2297 // for i1 values.
2298
2299 auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
2300 auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
2301 auto Signum = m_Or(LHS, RHS);
2302
2303 return Signum.match(V) && OpL == OpR && Val.match(OpL);
2304 }
2305};
2306
2307/// Matches a signum pattern.
2308///
2309/// signum(x) =
2310/// x > 0 -> 1
2311/// x == 0 -> 0
2312/// x < 0 -> -1
2313template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
2314 return Signum_match<Val_t>(V);
2315}
2316
2317template <int Ind, typename Opnd_t> struct ExtractValue_match {
2318 Opnd_t Val;
2319 ExtractValue_match(const Opnd_t &V) : Val(V) {}
2320
2321 template <typename OpTy> bool match(OpTy *V) {
2322 if (auto *I = dyn_cast<ExtractValueInst>(V)) {
2323 // If Ind is -1, don't inspect indices
2324 if (Ind != -1 &&
2325 !(I->getNumIndices() == 1 && I->getIndices()[0] == (unsigned)Ind))
2326 return false;
2327 return Val.match(I->getAggregateOperand());
2328 }
2329 return false;
2330 }
2331};
2332
2333/// Match a single index ExtractValue instruction.
2334/// For example m_ExtractValue<1>(...)
2335template <int Ind, typename Val_t>
2336inline ExtractValue_match<Ind, Val_t> m_ExtractValue(const Val_t &V) {
2337 return ExtractValue_match<Ind, Val_t>(V);
2338}
2339
2340/// Match an ExtractValue instruction with any index.
2341/// For example m_ExtractValue(...)
2342template <typename Val_t>
2343inline ExtractValue_match<-1, Val_t> m_ExtractValue(const Val_t &V) {
2344 return ExtractValue_match<-1, Val_t>(V);
2345}
2346
2347/// Matcher for a single index InsertValue instruction.
2348template <int Ind, typename T0, typename T1> struct InsertValue_match {
2349 T0 Op0;
2350 T1 Op1;
2351
2352 InsertValue_match(const T0 &Op0, const T1 &Op1) : Op0(Op0), Op1(Op1) {}
2353
2354 template <typename OpTy> bool match(OpTy *V) {
2355 if (auto *I = dyn_cast<InsertValueInst>(V)) {
2356 return Op0.match(I->getOperand(0)) && Op1.match(I->getOperand(1)) &&
2357 I->getNumIndices() == 1 && Ind == I->getIndices()[0];
2358 }
2359 return false;
2360 }
2361};
2362
2363/// Matches a single index InsertValue instruction.
2364template <int Ind, typename Val_t, typename Elt_t>
2365inline InsertValue_match<Ind, Val_t, Elt_t> m_InsertValue(const Val_t &Val,
2366 const Elt_t &Elt) {
2367 return InsertValue_match<Ind, Val_t, Elt_t>(Val, Elt);
2368}
2369
2370/// Matches patterns for `vscale`. This can either be a call to `llvm.vscale` or
2371/// the constant expression
2372/// `ptrtoint(gep <vscale x 1 x i8>, <vscale x 1 x i8>* null, i32 1>`
2373/// under the right conditions determined by DataLayout.
2374struct VScaleVal_match {
2375private:
2376 template <typename Base, typename Offset>
2377 inline BinaryOp_match<Base, Offset, Instruction::GetElementPtr>
2378 m_OffsetGep(const Base &B, const Offset &O) {
2379 return BinaryOp_match<Base, Offset, Instruction::GetElementPtr>(B, O);
2380 }
2381
2382public:
2383 const DataLayout &DL;
2384 VScaleVal_match(const DataLayout &DL) : DL(DL) {}
2385
2386 template <typename ITy> bool match(ITy *V) {
2387 if (m_Intrinsic<Intrinsic::vscale>().match(V))
2388 return true;
2389
2390 if (m_PtrToInt(m_OffsetGep(m_Zero(), m_SpecificInt(1))).match(V)) {
2391 Type *PtrTy = cast<Operator>(V)->getOperand(0)->getType();
2392 auto *DerefTy = PtrTy->getPointerElementType();
2393 if (isa<ScalableVectorType>(DerefTy) &&
2394 DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8)
2395 return true;
2396 }
2397
2398 return false;
2399 }
2400};
2401
2402inline VScaleVal_match m_VScale(const DataLayout &DL) {
2403 return VScaleVal_match(DL);
2404}
2405
2406template <typename LHS, typename RHS, unsigned Opcode>
2407struct LogicalOp_match {
2408 LHS L;
2409 RHS R;
2410
2411 LogicalOp_match(const LHS &L, const RHS &R) : L(L), R(R) {}
2412
2413 template <typename T> bool match(T *V) {
2414 if (auto *I = dyn_cast<Instruction>(V)) {
2415 if (!I->getType()->isIntOrIntVectorTy(1))
2416 return false;
2417
2418 if (I->getOpcode() == Opcode && L.match(I->getOperand(0)) &&
2419 R.match(I->getOperand(1)))
2420 return true;
2421
2422 if (auto *SI = dyn_cast<SelectInst>(I)) {
2423 if (Opcode == Instruction::And) {
2424 if (const auto *C = dyn_cast<Constant>(SI->getFalseValue()))
2425 if (C->isNullValue() && L.match(SI->getCondition()) &&
2426 R.match(SI->getTrueValue()))
2427 return true;
2428 } else {
2429 assert(Opcode == Instruction::Or)((Opcode == Instruction::Or) ? static_cast<void> (0) : __assert_fail
("Opcode == Instruction::Or", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/IR/PatternMatch.h"
, 2429, __PRETTY_FUNCTION__))
;
2430 if (const auto *C = dyn_cast<Constant>(SI->getTrueValue()))
2431 if (C->isOneValue() && L.match(SI->getCondition()) &&
2432 R.match(SI->getFalseValue()))
2433 return true;
2434 }
2435 }
2436 }
2437
2438 return false;
2439 }
2440};
2441
2442/// Matches L && R either in the form of L & R or L ? R : false.
2443/// Note that the latter form is poison-blocking.
2444template <typename LHS, typename RHS>
2445inline LogicalOp_match<LHS, RHS, Instruction::And>
2446m_LogicalAnd(const LHS &L, const RHS &R) {
2447 return LogicalOp_match<LHS, RHS, Instruction::And>(L, R);
2448}
2449
2450/// Matches L && R where L and R are arbitrary values.
2451inline auto m_LogicalAnd() { return m_LogicalAnd(m_Value(), m_Value()); }
2452
2453/// Matches L || R either in the form of L | R or L ? true : R.
2454/// Note that the latter form is poison-blocking.
2455template <typename LHS, typename RHS>
2456inline LogicalOp_match<LHS, RHS, Instruction::Or>
2457m_LogicalOr(const LHS &L, const RHS &R) {
2458 return LogicalOp_match<LHS, RHS, Instruction::Or>(L, R);
2459}
2460
2461/// Matches L || R where L and R are arbitrary values.
2462inline auto m_LogicalOr() {
2463 return m_LogicalOr(m_Value(), m_Value());
2464}
2465
2466} // end namespace PatternMatch
2467} // end namespace llvm
2468
2469#endif // LLVM_IR_PATTERNMATCH_H