Bug Summary

File:lib/Transforms/InstCombine/InstCombineShifts.cpp
Warning:line 205, column 25
Called C++ object pointer is uninitialized

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InstCombineShifts.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn326246/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/lib/Transforms/InstCombine -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-02-28-041547-14988-1 -x c++ /build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp

/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp

1//===- InstCombineShifts.cpp ----------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitShl, visitLShr, and visitAShr functions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/Analysis/ConstantFolding.h"
16#include "llvm/Analysis/InstructionSimplify.h"
17#include "llvm/IR/IntrinsicInst.h"
18#include "llvm/IR/PatternMatch.h"
19using namespace llvm;
20using namespace PatternMatch;
21
22#define DEBUG_TYPE"instcombine" "instcombine"
23
24Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
25 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
26 assert(Op0->getType() == Op1->getType())(static_cast <bool> (Op0->getType() == Op1->getType
()) ? void (0) : __assert_fail ("Op0->getType() == Op1->getType()"
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 26, __extension__ __PRETTY_FUNCTION__))
;
27
28 // See if we can fold away this shift.
29 if (SimplifyDemandedInstructionBits(I))
30 return &I;
31
32 // Try to fold constant and into select arguments.
33 if (isa<Constant>(Op0))
34 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
35 if (Instruction *R = FoldOpIntoSelect(I, SI))
36 return R;
37
38 if (Constant *CUI = dyn_cast<Constant>(Op1))
39 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
40 return Res;
41
42 // (C1 shift (A add C2)) -> (C1 shift C2) shift A)
43 // iff A and C2 are both positive.
44 Value *A;
45 Constant *C;
46 if (match(Op0, m_Constant()) && match(Op1, m_Add(m_Value(A), m_Constant(C))))
47 if (isKnownNonNegative(A, DL, 0, &AC, &I, &DT) &&
48 isKnownNonNegative(C, DL, 0, &AC, &I, &DT))
49 return BinaryOperator::Create(
50 I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), Op0, C), A);
51
52 // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
53 // Because shifts by negative values (which could occur if A were negative)
54 // are undefined.
55 const APInt *B;
56 if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) {
57 // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
58 // demand the sign bit (and many others) here??
59 Value *Rem = Builder.CreateAnd(A, ConstantInt::get(I.getType(), *B - 1),
60 Op1->getName());
61 I.setOperand(1, Rem);
62 return &I;
63 }
64
65 return nullptr;
66}
67
68/// Return true if we can simplify two logical (either left or right) shifts
69/// that have constant shift amounts: OuterShift (InnerShift X, C1), C2.
70static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
71 Instruction *InnerShift, InstCombiner &IC,
72 Instruction *CxtI) {
73 assert(InnerShift->isLogicalShift() && "Unexpected instruction type")(static_cast <bool> (InnerShift->isLogicalShift() &&
"Unexpected instruction type") ? void (0) : __assert_fail ("InnerShift->isLogicalShift() && \"Unexpected instruction type\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 73, __extension__ __PRETTY_FUNCTION__))
;
74
75 // We need constant scalar or constant splat shifts.
76 const APInt *InnerShiftConst;
77 if (!match(InnerShift->getOperand(1), m_APInt(InnerShiftConst)))
78 return false;
79
80 // Two logical shifts in the same direction:
81 // shl (shl X, C1), C2 --> shl X, C1 + C2
82 // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
83 bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
84 if (IsInnerShl == IsOuterShl)
85 return true;
86
87 // Equal shift amounts in opposite directions become bitwise 'and':
88 // lshr (shl X, C), C --> and X, C'
89 // shl (lshr X, C), C --> and X, C'
90 if (*InnerShiftConst == OuterShAmt)
91 return true;
92
93 // If the 2nd shift is bigger than the 1st, we can fold:
94 // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3
95 // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3
96 // but it isn't profitable unless we know the and'd out bits are already zero.
97 // Also, check that the inner shift is valid (less than the type width) or
98 // we'll crash trying to produce the bit mask for the 'and'.
99 unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits();
100 if (InnerShiftConst->ugt(OuterShAmt) && InnerShiftConst->ult(TypeWidth)) {
101 unsigned InnerShAmt = InnerShiftConst->getZExtValue();
102 unsigned MaskShift =
103 IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt;
104 APInt Mask = APInt::getLowBitsSet(TypeWidth, OuterShAmt) << MaskShift;
105 if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI))
106 return true;
107 }
108
109 return false;
110}
111
112/// See if we can compute the specified value, but shifted logically to the left
113/// or right by some number of bits. This should return true if the expression
114/// can be computed for the same cost as the current expression tree. This is
115/// used to eliminate extraneous shifting from things like:
116/// %C = shl i128 %A, 64
117/// %D = shl i128 %B, 96
118/// %E = or i128 %C, %D
119/// %F = lshr i128 %E, 64
120/// where the client will ask if E can be computed shifted right by 64-bits. If
121/// this succeeds, getShiftedValue() will be called to produce the value.
122static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
123 InstCombiner &IC, Instruction *CxtI) {
124 // We can always evaluate constants shifted.
125 if (isa<Constant>(V))
126 return true;
127
128 Instruction *I = dyn_cast<Instruction>(V);
129 if (!I) return false;
130
131 // If this is the opposite shift, we can directly reuse the input of the shift
132 // if the needed bits are already zero in the input. This allows us to reuse
133 // the value which means that we don't care if the shift has multiple uses.
134 // TODO: Handle opposite shift by exact value.
135 ConstantInt *CI = nullptr;
136 if ((IsLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
137 (!IsLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
138 if (CI->getValue() == NumBits) {
139 // TODO: Check that the input bits are already zero with MaskedValueIsZero
140#if 0
141 // If this is a truncate of a logical shr, we can truncate it to a smaller
142 // lshr iff we know that the bits we would otherwise be shifting in are
143 // already zeros.
144 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
145 uint32_t BitWidth = Ty->getScalarSizeInBits();
146 if (MaskedValueIsZero(I->getOperand(0),
147 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
148 CI->getLimitedValue(BitWidth) < BitWidth) {
149 return CanEvaluateTruncated(I->getOperand(0), Ty);
150 }
151#endif
152
153 }
154 }
155
156 // We can't mutate something that has multiple uses: doing so would
157 // require duplicating the instruction in general, which isn't profitable.
158 if (!I->hasOneUse()) return false;
159
160 switch (I->getOpcode()) {
161 default: return false;
162 case Instruction::And:
163 case Instruction::Or:
164 case Instruction::Xor:
165 // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
166 return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) &&
167 canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I);
168
169 case Instruction::Shl:
170 case Instruction::LShr:
171 return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI);
172
173 case Instruction::Select: {
174 SelectInst *SI = cast<SelectInst>(I);
175 Value *TrueVal = SI->getTrueValue();
176 Value *FalseVal = SI->getFalseValue();
177 return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) &&
178 canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI);
179 }
180 case Instruction::PHI: {
181 // We can change a phi if we can change all operands. Note that we never
182 // get into trouble with cyclic PHIs here because we only consider
183 // instructions with a single use.
184 PHINode *PN = cast<PHINode>(I);
185 for (Value *IncValue : PN->incoming_values())
186 if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN))
187 return false;
188 return true;
189 }
190 }
191}
192
193/// Fold OuterShift (InnerShift X, C1), C2.
194/// See canEvaluateShiftedShift() for the constraints on these instructions.
195static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt,
196 bool IsOuterShl,
197 InstCombiner::BuilderTy &Builder) {
198 bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
15
Assuming the condition is false
199 Type *ShType = InnerShift->getType();
200 unsigned TypeWidth = ShType->getScalarSizeInBits();
201
202 // We only accept shifts-by-a-constant in canEvaluateShifted().
203 const APInt *C1;
16
'C1' declared without an initial value
204 match(InnerShift->getOperand(1), m_APInt(C1));
17
Calling 'm_APInt'
22
Returning from 'm_APInt'
205 unsigned InnerShAmt = C1->getZExtValue();
23
Called C++ object pointer is uninitialized
206
207 // Change the shift amount and clear the appropriate IR flags.
208 auto NewInnerShift = [&](unsigned ShAmt) {
209 InnerShift->setOperand(1, ConstantInt::get(ShType, ShAmt));
210 if (IsInnerShl) {
211 InnerShift->setHasNoUnsignedWrap(false);
212 InnerShift->setHasNoSignedWrap(false);
213 } else {
214 InnerShift->setIsExact(false);
215 }
216 return InnerShift;
217 };
218
219 // Two logical shifts in the same direction:
220 // shl (shl X, C1), C2 --> shl X, C1 + C2
221 // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
222 if (IsInnerShl == IsOuterShl) {
223 // If this is an oversized composite shift, then unsigned shifts get 0.
224 if (InnerShAmt + OuterShAmt >= TypeWidth)
225 return Constant::getNullValue(ShType);
226
227 return NewInnerShift(InnerShAmt + OuterShAmt);
228 }
229
230 // Equal shift amounts in opposite directions become bitwise 'and':
231 // lshr (shl X, C), C --> and X, C'
232 // shl (lshr X, C), C --> and X, C'
233 if (InnerShAmt == OuterShAmt) {
234 APInt Mask = IsInnerShl
235 ? APInt::getLowBitsSet(TypeWidth, TypeWidth - OuterShAmt)
236 : APInt::getHighBitsSet(TypeWidth, TypeWidth - OuterShAmt);
237 Value *And = Builder.CreateAnd(InnerShift->getOperand(0),
238 ConstantInt::get(ShType, Mask));
239 if (auto *AndI = dyn_cast<Instruction>(And)) {
240 AndI->moveBefore(InnerShift);
241 AndI->takeName(InnerShift);
242 }
243 return And;
244 }
245
246 assert(InnerShAmt > OuterShAmt &&(static_cast <bool> (InnerShAmt > OuterShAmt &&
"Unexpected opposite direction logical shift pair") ? void (
0) : __assert_fail ("InnerShAmt > OuterShAmt && \"Unexpected opposite direction logical shift pair\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 247, __extension__ __PRETTY_FUNCTION__))
247 "Unexpected opposite direction logical shift pair")(static_cast <bool> (InnerShAmt > OuterShAmt &&
"Unexpected opposite direction logical shift pair") ? void (
0) : __assert_fail ("InnerShAmt > OuterShAmt && \"Unexpected opposite direction logical shift pair\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 247, __extension__ __PRETTY_FUNCTION__))
;
248
249 // In general, we would need an 'and' for this transform, but
250 // canEvaluateShiftedShift() guarantees that the masked-off bits are not used.
251 // lshr (shl X, C1), C2 --> shl X, C1 - C2
252 // shl (lshr X, C1), C2 --> lshr X, C1 - C2
253 return NewInnerShift(InnerShAmt - OuterShAmt);
254}
255
256/// When canEvaluateShifted() returns true for an expression, this function
257/// inserts the new computation that produces the shifted value.
258static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
259 InstCombiner &IC, const DataLayout &DL) {
260 // We can always evaluate constants shifted.
261 if (Constant *C = dyn_cast<Constant>(V)) {
1
Taking false branch
12
Taking false branch
262 if (isLeftShift)
263 V = IC.Builder.CreateShl(C, NumBits);
264 else
265 V = IC.Builder.CreateLShr(C, NumBits);
266 // If we got a constantexpr back, try to simplify it with TD info.
267 if (auto *C = dyn_cast<Constant>(V))
268 if (auto *FoldedC =
269 ConstantFoldConstant(C, DL, &IC.getTargetLibraryInfo()))
270 V = FoldedC;
271 return V;
272 }
273
274 Instruction *I = cast<Instruction>(V);
275 IC.Worklist.Add(I);
276
277 switch (I->getOpcode()) {
2
Control jumps to 'case PHI:' at line 300
13
Control jumps to 'case Shl:' at line 289
278 default: llvm_unreachable("Inconsistency with CanEvaluateShifted")::llvm::llvm_unreachable_internal("Inconsistency with CanEvaluateShifted"
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 278)
;
279 case Instruction::And:
280 case Instruction::Or:
281 case Instruction::Xor:
282 // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
283 I->setOperand(
284 0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
285 I->setOperand(
286 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
287 return I;
288
289 case Instruction::Shl:
290 case Instruction::LShr:
291 return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
14
Calling 'foldShiftedShift'
292 IC.Builder);
293
294 case Instruction::Select:
295 I->setOperand(
296 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
297 I->setOperand(
298 2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
299 return I;
300 case Instruction::PHI: {
301 // We can change a phi if we can change all operands. Note that we never
302 // get into trouble with cyclic PHIs here because we only consider
303 // instructions with a single use.
304 PHINode *PN = cast<PHINode>(I);
305 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
3
Assuming 'i' is not equal to 'e'
4
Loop condition is true. Entering loop body
5
Assuming 'i' is not equal to 'e'
6
Loop condition is true. Entering loop body
7
Assuming 'i' is not equal to 'e'
8
Loop condition is true. Entering loop body
9
Assuming 'i' is not equal to 'e'
10
Loop condition is true. Entering loop body
306 PN->setIncomingValue(i, getShiftedValue(PN->getIncomingValue(i), NumBits,
11
Calling 'getShiftedValue'
307 isLeftShift, IC, DL));
308 return PN;
309 }
310 }
311}
312
313// If this is a bitwise operator or add with a constant RHS we might be able
314// to pull it through a shift.
315static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift,
316 BinaryOperator *BO,
317 const APInt &C) {
318 bool IsValid = true; // Valid only for And, Or Xor,
319 bool HighBitSet = false; // Transform ifhigh bit of constant set?
320
321 switch (BO->getOpcode()) {
322 default: IsValid = false; break; // Do not perform transform!
323 case Instruction::Add:
324 IsValid = Shift.getOpcode() == Instruction::Shl;
325 break;
326 case Instruction::Or:
327 case Instruction::Xor:
328 HighBitSet = false;
329 break;
330 case Instruction::And:
331 HighBitSet = true;
332 break;
333 }
334
335 // If this is a signed shift right, and the high bit is modified
336 // by the logical operation, do not perform the transformation.
337 // The HighBitSet boolean indicates the value of the high bit of
338 // the constant which would cause it to be modified for this
339 // operation.
340 //
341 if (IsValid && Shift.getOpcode() == Instruction::AShr)
342 IsValid = C.isNegative() == HighBitSet;
343
344 return IsValid;
345}
346
347Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
348 BinaryOperator &I) {
349 bool isLeftShift = I.getOpcode() == Instruction::Shl;
350
351 const APInt *Op1C;
352 if (!match(Op1, m_APInt(Op1C)))
353 return nullptr;
354
355 // See if we can propagate this shift into the input, this covers the trivial
356 // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
357 if (I.getOpcode() != Instruction::AShr &&
358 canEvaluateShifted(Op0, Op1C->getZExtValue(), isLeftShift, *this, &I)) {
359 DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I <<"\n"; } } while (false)
360 " to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I <<"\n"; } } while (false)
;
361
362 return replaceInstUsesWith(
363 I, getShiftedValue(Op0, Op1C->getZExtValue(), isLeftShift, *this, DL));
364 }
365
366 // See if we can simplify any instructions used by the instruction whose sole
367 // purpose is to compute bits we don't care about.
368 unsigned TypeBits = Op0->getType()->getScalarSizeInBits();
369
370 assert(!Op1C->uge(TypeBits) &&(static_cast <bool> (!Op1C->uge(TypeBits) &&
"Shift over the type width should have been removed already"
) ? void (0) : __assert_fail ("!Op1C->uge(TypeBits) && \"Shift over the type width should have been removed already\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 371, __extension__ __PRETTY_FUNCTION__))
371 "Shift over the type width should have been removed already")(static_cast <bool> (!Op1C->uge(TypeBits) &&
"Shift over the type width should have been removed already"
) ? void (0) : __assert_fail ("!Op1C->uge(TypeBits) && \"Shift over the type width should have been removed already\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 371, __extension__ __PRETTY_FUNCTION__))
;
372
373 if (Instruction *FoldedShift = foldOpWithConstantIntoOperand(I))
374 return FoldedShift;
375
376 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
377 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
378 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
379 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
380 // place. Don't try to do this transformation in this case. Also, we
381 // require that the input operand is a shift-by-constant so that we have
382 // confidence that the shifts will get folded together. We could do this
383 // xform in more cases, but it is unlikely to be profitable.
384 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
385 isa<ConstantInt>(TrOp->getOperand(1))) {
386 // Okay, we'll do this xform. Make the shift of shift.
387 Constant *ShAmt =
388 ConstantExpr::getZExt(cast<Constant>(Op1), TrOp->getType());
389 // (shift2 (shift1 & 0x00FF), c2)
390 Value *NSh = Builder.CreateBinOp(I.getOpcode(), TrOp, ShAmt, I.getName());
391
392 // For logical shifts, the truncation has the effect of making the high
393 // part of the register be zeros. Emulate this by inserting an AND to
394 // clear the top bits as needed. This 'and' will usually be zapped by
395 // other xforms later if dead.
396 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
397 unsigned DstSize = TI->getType()->getScalarSizeInBits();
398 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
399
400 // The mask we constructed says what the trunc would do if occurring
401 // between the shifts. We want to know the effect *after* the second
402 // shift. We know that it is a logical shift by a constant, so adjust the
403 // mask as appropriate.
404 if (I.getOpcode() == Instruction::Shl)
405 MaskV <<= Op1C->getZExtValue();
406 else {
407 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift")(static_cast <bool> (I.getOpcode() == Instruction::LShr
&& "Unknown logical shift") ? void (0) : __assert_fail
("I.getOpcode() == Instruction::LShr && \"Unknown logical shift\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 407, __extension__ __PRETTY_FUNCTION__))
;
408 MaskV.lshrInPlace(Op1C->getZExtValue());
409 }
410
411 // shift1 & 0x00FF
412 Value *And = Builder.CreateAnd(NSh,
413 ConstantInt::get(I.getContext(), MaskV),
414 TI->getName());
415
416 // Return the value truncated to the interesting size.
417 return new TruncInst(And, I.getType());
418 }
419 }
420
421 if (Op0->hasOneUse()) {
422 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
423 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
424 Value *V1, *V2;
425 ConstantInt *CC;
426 switch (Op0BO->getOpcode()) {
427 default: break;
428 case Instruction::Add:
429 case Instruction::And:
430 case Instruction::Or:
431 case Instruction::Xor: {
432 // These operators commute.
433 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
434 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
435 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
436 m_Specific(Op1)))) {
437 Value *YS = // (Y << C)
438 Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
439 // (X + (Y << C))
440 Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), YS, V1,
441 Op0BO->getOperand(1)->getName());
442 unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
443
444 APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
445 Constant *Mask = ConstantInt::get(I.getContext(), Bits);
446 if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
447 Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
448 return BinaryOperator::CreateAnd(X, Mask);
449 }
450
451 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
452 Value *Op0BOOp1 = Op0BO->getOperand(1);
453 if (isLeftShift && Op0BOOp1->hasOneUse() &&
454 match(Op0BOOp1,
455 m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
456 m_ConstantInt(CC)))) {
457 Value *YS = // (Y << C)
458 Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
459 // X & (CC << C)
460 Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
461 V1->getName()+".mask");
462 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
463 }
464 LLVM_FALLTHROUGH[[clang::fallthrough]];
465 }
466
467 case Instruction::Sub: {
468 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
469 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
470 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
471 m_Specific(Op1)))) {
472 Value *YS = // (Y << C)
473 Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
474 // (X + (Y << C))
475 Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), V1, YS,
476 Op0BO->getOperand(0)->getName());
477 unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
478
479 APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
480 Constant *Mask = ConstantInt::get(I.getContext(), Bits);
481 if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
482 Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
483 return BinaryOperator::CreateAnd(X, Mask);
484 }
485
486 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
487 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
488 match(Op0BO->getOperand(0),
489 m_And(m_OneUse(m_Shr(m_Value(V1), m_Value(V2))),
490 m_ConstantInt(CC))) && V2 == Op1) {
491 Value *YS = // (Y << C)
492 Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
493 // X & (CC << C)
494 Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
495 V1->getName()+".mask");
496
497 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
498 }
499
500 break;
501 }
502 }
503
504
505 // If the operand is a bitwise operator with a constant RHS, and the
506 // shift is the only use, we can pull it out of the shift.
507 const APInt *Op0C;
508 if (match(Op0BO->getOperand(1), m_APInt(Op0C))) {
509 if (canShiftBinOpWithConstantRHS(I, Op0BO, *Op0C)) {
510 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
511 cast<Constant>(Op0BO->getOperand(1)), Op1);
512
513 Value *NewShift =
514 Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
515 NewShift->takeName(Op0BO);
516
517 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
518 NewRHS);
519 }
520 }
521
522 // If the operand is a subtract with a constant LHS, and the shift
523 // is the only use, we can pull it out of the shift.
524 // This folds (shl (sub C1, X), C2) -> (sub (C1 << C2), (shl X, C2))
525 if (isLeftShift && Op0BO->getOpcode() == Instruction::Sub &&
526 match(Op0BO->getOperand(0), m_APInt(Op0C))) {
527 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
528 cast<Constant>(Op0BO->getOperand(0)), Op1);
529
530 Value *NewShift = Builder.CreateShl(Op0BO->getOperand(1), Op1);
531 NewShift->takeName(Op0BO);
532
533 return BinaryOperator::CreateSub(NewRHS, NewShift);
534 }
535 }
536
537 // If we have a select that conditionally executes some binary operator,
538 // see if we can pull it the select and operator through the shift.
539 //
540 // For example, turning:
541 // shl (select C, (add X, C1), X), C2
542 // Into:
543 // Y = shl X, C2
544 // select C, (add Y, C1 << C2), Y
545 Value *Cond;
546 BinaryOperator *TBO;
547 Value *FalseVal;
548 if (match(Op0, m_Select(m_Value(Cond), m_OneUse(m_BinOp(TBO)),
549 m_Value(FalseVal)))) {
550 const APInt *C;
551 if (!isa<Constant>(FalseVal) && TBO->getOperand(0) == FalseVal &&
552 match(TBO->getOperand(1), m_APInt(C)) &&
553 canShiftBinOpWithConstantRHS(I, TBO, *C)) {
554 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
555 cast<Constant>(TBO->getOperand(1)), Op1);
556
557 Value *NewShift =
558 Builder.CreateBinOp(I.getOpcode(), FalseVal, Op1);
559 Value *NewOp = Builder.CreateBinOp(TBO->getOpcode(), NewShift,
560 NewRHS);
561 return SelectInst::Create(Cond, NewOp, NewShift);
562 }
563 }
564
565 BinaryOperator *FBO;
566 Value *TrueVal;
567 if (match(Op0, m_Select(m_Value(Cond), m_Value(TrueVal),
568 m_OneUse(m_BinOp(FBO))))) {
569 const APInt *C;
570 if (!isa<Constant>(TrueVal) && FBO->getOperand(0) == TrueVal &&
571 match(FBO->getOperand(1), m_APInt(C)) &&
572 canShiftBinOpWithConstantRHS(I, FBO, *C)) {
573 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
574 cast<Constant>(FBO->getOperand(1)), Op1);
575
576 Value *NewShift =
577 Builder.CreateBinOp(I.getOpcode(), TrueVal, Op1);
578 Value *NewOp = Builder.CreateBinOp(FBO->getOpcode(), NewShift,
579 NewRHS);
580 return SelectInst::Create(Cond, NewShift, NewOp);
581 }
582 }
583 }
584
585 return nullptr;
586}
587
588Instruction *InstCombiner::visitShl(BinaryOperator &I) {
589 if (Value *V = SimplifyVectorOp(I))
590 return replaceInstUsesWith(I, V);
591
592 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
593 if (Value *V =
594 SimplifyShlInst(Op0, Op1, I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
595 SQ.getWithInstruction(&I)))
596 return replaceInstUsesWith(I, V);
597
598 if (Instruction *V = commonShiftTransforms(I))
599 return V;
600
601 const APInt *ShAmtAPInt;
602 if (match(Op1, m_APInt(ShAmtAPInt))) {
603 unsigned ShAmt = ShAmtAPInt->getZExtValue();
604 unsigned BitWidth = I.getType()->getScalarSizeInBits();
605 Type *Ty = I.getType();
606
607 // shl (zext X), ShAmt --> zext (shl X, ShAmt)
608 // This is only valid if X would have zeros shifted out.
609 Value *X;
610 if (match(Op0, m_ZExt(m_Value(X)))) {
611 unsigned SrcWidth = X->getType()->getScalarSizeInBits();
612 if (ShAmt < SrcWidth &&
613 MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmt), 0, &I))
614 return new ZExtInst(Builder.CreateShl(X, ShAmt), Ty);
615 }
616
617 // (X >> C) << C --> X & (-1 << C)
618 if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1)))) {
619 APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
620 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
621 }
622
623 // Be careful about hiding shl instructions behind bit masks. They are used
624 // to represent multiplies by a constant, and it is important that simple
625 // arithmetic expressions are still recognizable by scalar evolution.
626 // The inexact versions are deferred to DAGCombine, so we don't hide shl
627 // behind a bit mask.
628 const APInt *ShOp1;
629 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_APInt(ShOp1))))) {
630 unsigned ShrAmt = ShOp1->getZExtValue();
631 if (ShrAmt < ShAmt) {
632 // If C1 < C2: (X >>?,exact C1) << C2 --> X << (C2 - C1)
633 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShrAmt);
634 auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
635 NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
636 NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
637 return NewShl;
638 }
639 if (ShrAmt > ShAmt) {
640 // If C1 > C2: (X >>?exact C1) << C2 --> X >>?exact (C1 - C2)
641 Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmt);
642 auto *NewShr = BinaryOperator::Create(
643 cast<BinaryOperator>(Op0)->getOpcode(), X, ShiftDiff);
644 NewShr->setIsExact(true);
645 return NewShr;
646 }
647 }
648
649 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1)))) {
650 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
651 // Oversized shifts are simplified to zero in InstSimplify.
652 if (AmtSum < BitWidth)
653 // (X << C1) << C2 --> X << (C1 + C2)
654 return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
655 }
656
657 // If the shifted-out value is known-zero, then this is a NUW shift.
658 if (!I.hasNoUnsignedWrap() &&
659 MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, ShAmt), 0, &I)) {
660 I.setHasNoUnsignedWrap();
661 return &I;
662 }
663
664 // If the shifted-out value is all signbits, then this is a NSW shift.
665 if (!I.hasNoSignedWrap() && ComputeNumSignBits(Op0, 0, &I) > ShAmt) {
666 I.setHasNoSignedWrap();
667 return &I;
668 }
669 }
670
671 Constant *C1;
672 if (match(Op1, m_Constant(C1))) {
673 Constant *C2;
674 Value *X;
675 // (C2 << X) << C1 --> (C2 << C1) << X
676 if (match(Op0, m_OneUse(m_Shl(m_Constant(C2), m_Value(X)))))
677 return BinaryOperator::CreateShl(ConstantExpr::getShl(C2, C1), X);
678
679 // (X * C2) << C1 --> X * (C2 << C1)
680 if (match(Op0, m_Mul(m_Value(X), m_Constant(C2))))
681 return BinaryOperator::CreateMul(X, ConstantExpr::getShl(C2, C1));
682 }
683
684 return nullptr;
685}
686
687Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
688 if (Value *V = SimplifyVectorOp(I))
689 return replaceInstUsesWith(I, V);
690
691 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
692 if (Value *V =
693 SimplifyLShrInst(Op0, Op1, I.isExact(), SQ.getWithInstruction(&I)))
694 return replaceInstUsesWith(I, V);
695
696 if (Instruction *R = commonShiftTransforms(I))
697 return R;
698
699 Type *Ty = I.getType();
700 const APInt *ShAmtAPInt;
701 if (match(Op1, m_APInt(ShAmtAPInt))) {
702 unsigned ShAmt = ShAmtAPInt->getZExtValue();
703 unsigned BitWidth = Ty->getScalarSizeInBits();
704 auto *II = dyn_cast<IntrinsicInst>(Op0);
705 if (II && isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt &&
706 (II->getIntrinsicID() == Intrinsic::ctlz ||
707 II->getIntrinsicID() == Intrinsic::cttz ||
708 II->getIntrinsicID() == Intrinsic::ctpop)) {
709 // ctlz.i32(x)>>5 --> zext(x == 0)
710 // cttz.i32(x)>>5 --> zext(x == 0)
711 // ctpop.i32(x)>>5 --> zext(x == -1)
712 bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
713 Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
714 Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
715 return new ZExtInst(Cmp, Ty);
716 }
717
718 Value *X;
719 const APInt *ShOp1;
720 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1)))) {
721 unsigned ShlAmt = ShOp1->getZExtValue();
722 if (ShlAmt < ShAmt) {
723 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
724 if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
725 // (X <<nuw C1) >>u C2 --> X >>u (C2 - C1)
726 auto *NewLShr = BinaryOperator::CreateLShr(X, ShiftDiff);
727 NewLShr->setIsExact(I.isExact());
728 return NewLShr;
729 }
730 // (X << C1) >>u C2 --> (X >>u (C2 - C1)) & (-1 >> C2)
731 Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
732 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
733 return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
734 }
735 if (ShlAmt > ShAmt) {
736 Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
737 if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
738 // (X <<nuw C1) >>u C2 --> X <<nuw (C1 - C2)
739 auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
740 NewShl->setHasNoUnsignedWrap(true);
741 return NewShl;
742 }
743 // (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2)
744 Value *NewShl = Builder.CreateShl(X, ShiftDiff);
745 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
746 return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
747 }
748 assert(ShlAmt == ShAmt)(static_cast <bool> (ShlAmt == ShAmt) ? void (0) : __assert_fail
("ShlAmt == ShAmt", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 748, __extension__ __PRETTY_FUNCTION__))
;
749 // (X << C) >>u C --> X & (-1 >>u C)
750 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
751 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
752 }
753
754 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) &&
755 (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
756 assert(ShAmt < X->getType()->getScalarSizeInBits() &&(static_cast <bool> (ShAmt < X->getType()->getScalarSizeInBits
() && "Big shift not simplified to zero?") ? void (0)
: __assert_fail ("ShAmt < X->getType()->getScalarSizeInBits() && \"Big shift not simplified to zero?\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 757, __extension__ __PRETTY_FUNCTION__))
757 "Big shift not simplified to zero?")(static_cast <bool> (ShAmt < X->getType()->getScalarSizeInBits
() && "Big shift not simplified to zero?") ? void (0)
: __assert_fail ("ShAmt < X->getType()->getScalarSizeInBits() && \"Big shift not simplified to zero?\""
, "/build/llvm-toolchain-snapshot-7~svn326246/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 757, __extension__ __PRETTY_FUNCTION__))
;
758 // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN
759 Value *NewLShr = Builder.CreateLShr(X, ShAmt);
760 return new ZExtInst(NewLShr, Ty);
761 }
762
763 if (match(Op0, m_SExt(m_Value(X))) &&
764 (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
765 // Are we moving the sign bit to the low bit and widening with high zeros?
766 unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits();
767 if (ShAmt == BitWidth - 1) {
768 // lshr (sext i1 X to iN), N-1 --> zext X to iN
769 if (SrcTyBitWidth == 1)
770 return new ZExtInst(X, Ty);
771
772 // lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
773 if (Op0->hasOneUse()) {
774 Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
775 return new ZExtInst(NewLShr, Ty);
776 }
777 }
778
779 // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN
780 if (ShAmt == BitWidth - SrcTyBitWidth && Op0->hasOneUse()) {
781 // The new shift amount can't be more than the narrow source type.
782 unsigned NewShAmt = std::min(ShAmt, SrcTyBitWidth - 1);
783 Value *AShr = Builder.CreateAShr(X, NewShAmt);
784 return new ZExtInst(AShr, Ty);
785 }
786 }
787
788 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShOp1)))) {
789 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
790 // Oversized shifts are simplified to zero in InstSimplify.
791 if (AmtSum < BitWidth)
792 // (X >>u C1) >>u C2 --> X >>u (C1 + C2)
793 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
794 }
795
796 // If the shifted-out value is known-zero, then this is an exact shift.
797 if (!I.isExact() &&
798 MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
799 I.setIsExact();
800 return &I;
801 }
802 }
803 return nullptr;
804}
805
806Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
807 if (Value *V = SimplifyVectorOp(I))
808 return replaceInstUsesWith(I, V);
809
810 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
811 if (Value *V =
812 SimplifyAShrInst(Op0, Op1, I.isExact(), SQ.getWithInstruction(&I)))
813 return replaceInstUsesWith(I, V);
814
815 if (Instruction *R = commonShiftTransforms(I))
816 return R;
817
818 Type *Ty = I.getType();
819 unsigned BitWidth = Ty->getScalarSizeInBits();
820 const APInt *ShAmtAPInt;
821 if (match(Op1, m_APInt(ShAmtAPInt)) && ShAmtAPInt->ult(BitWidth)) {
822 unsigned ShAmt = ShAmtAPInt->getZExtValue();
823
824 // If the shift amount equals the difference in width of the destination
825 // and source scalar types:
826 // ashr (shl (zext X), C), C --> sext X
827 Value *X;
828 if (match(Op0, m_Shl(m_ZExt(m_Value(X)), m_Specific(Op1))) &&
829 ShAmt == BitWidth - X->getType()->getScalarSizeInBits())
830 return new SExtInst(X, Ty);
831
832 // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However,
833 // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
834 const APInt *ShOp1;
835 if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShOp1))) &&
836 ShOp1->ult(BitWidth)) {
837 unsigned ShlAmt = ShOp1->getZExtValue();
838 if (ShlAmt < ShAmt) {
839 // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1)
840 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
841 auto *NewAShr = BinaryOperator::CreateAShr(X, ShiftDiff);
842 NewAShr->setIsExact(I.isExact());
843 return NewAShr;
844 }
845 if (ShlAmt > ShAmt) {
846 // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
847 Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
848 auto *NewShl = BinaryOperator::Create(Instruction::Shl, X, ShiftDiff);
849 NewShl->setHasNoSignedWrap(true);
850 return NewShl;
851 }
852 }
853
854 if (match(Op0, m_AShr(m_Value(X), m_APInt(ShOp1))) &&
855 ShOp1->ult(BitWidth)) {
856 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
857 // Oversized arithmetic shifts replicate the sign bit.
858 AmtSum = std::min(AmtSum, BitWidth - 1);
859 // (X >>s C1) >>s C2 --> X >>s (C1 + C2)
860 return BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum));
861 }
862
863 if (match(Op0, m_OneUse(m_SExt(m_Value(X)))) &&
864 (Ty->isVectorTy() || shouldChangeType(Ty, X->getType()))) {
865 // ashr (sext X), C --> sext (ashr X, C')
866 Type *SrcTy = X->getType();
867 ShAmt = std::min(ShAmt, SrcTy->getScalarSizeInBits() - 1);
868 Value *NewSh = Builder.CreateAShr(X, ConstantInt::get(SrcTy, ShAmt));
869 return new SExtInst(NewSh, Ty);
870 }
871
872 // If the shifted-out value is known-zero, then this is an exact shift.
873 if (!I.isExact() &&
874 MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
875 I.setIsExact();
876 return &I;
877 }
878 }
879
880 // See if we can turn a signed shr into an unsigned shr.
881 if (MaskedValueIsZero(Op0, APInt::getSignMask(BitWidth), 0, &I))
882 return BinaryOperator::CreateLShr(Op0, Op1);
883
884 return nullptr;
885}

/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/IR/PatternMatch.h

1//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides a simple and efficient mechanism for performing general
11// tree-based pattern matches on the LLVM IR. The power of these routines is
12// that it allows you to write concise patterns that are expressive and easy to
13// understand. The other major advantage of this is that it allows you to
14// trivially capture/bind elements in the pattern to variables. For example,
15// you can do something like this:
16//
17// Value *Exp = ...
18// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
19// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
20// m_And(m_Value(Y), m_ConstantInt(C2))))) {
21// ... Pattern is matched and variables are bound ...
22// }
23//
24// This is primarily useful to things like the instruction combiner, but can
25// also be useful for static analysis tools or code generators.
26//
27//===----------------------------------------------------------------------===//
28
29#ifndef LLVM_IR_PATTERNMATCH_H
30#define LLVM_IR_PATTERNMATCH_H
31
32#include "llvm/ADT/APFloat.h"
33#include "llvm/ADT/APInt.h"
34#include "llvm/IR/CallSite.h"
35#include "llvm/IR/Constant.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/IR/Instruction.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include <cstdint>
45
46namespace llvm {
47namespace PatternMatch {
48
49template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
50 return const_cast<Pattern &>(P).match(V);
51}
52
53template <typename SubPattern_t> struct OneUse_match {
54 SubPattern_t SubPattern;
55
56 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
57
58 template <typename OpTy> bool match(OpTy *V) {
59 return V->hasOneUse() && SubPattern.match(V);
60 }
61};
62
63template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
64 return SubPattern;
65}
66
67template <typename Class> struct class_match {
68 template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
69};
70
71/// Match an arbitrary value and ignore it.
72inline class_match<Value> m_Value() { return class_match<Value>(); }
73
74/// Match an arbitrary binary operation and ignore it.
75inline class_match<BinaryOperator> m_BinOp() {
76 return class_match<BinaryOperator>();
77}
78
79/// Matches any compare instruction and ignore it.
80inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
81
82/// Match an arbitrary ConstantInt and ignore it.
83inline class_match<ConstantInt> m_ConstantInt() {
84 return class_match<ConstantInt>();
85}
86
87/// Match an arbitrary undef constant.
88inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
89
90/// Match an arbitrary Constant and ignore it.
91inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
92
93/// Matching combinators
94template <typename LTy, typename RTy> struct match_combine_or {
95 LTy L;
96 RTy R;
97
98 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
99
100 template <typename ITy> bool match(ITy *V) {
101 if (L.match(V))
102 return true;
103 if (R.match(V))
104 return true;
105 return false;
106 }
107};
108
109template <typename LTy, typename RTy> struct match_combine_and {
110 LTy L;
111 RTy R;
112
113 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
114
115 template <typename ITy> bool match(ITy *V) {
116 if (L.match(V))
117 if (R.match(V))
118 return true;
119 return false;
120 }
121};
122
123/// Combine two pattern matchers matching L || R
124template <typename LTy, typename RTy>
125inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
126 return match_combine_or<LTy, RTy>(L, R);
127}
128
129/// Combine two pattern matchers matching L && R
130template <typename LTy, typename RTy>
131inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
132 return match_combine_and<LTy, RTy>(L, R);
133}
134
135struct match_zero {
136 template <typename ITy> bool match(ITy *V) {
137 if (const auto *C = dyn_cast<Constant>(V))
138 return C->isNullValue();
139 return false;
140 }
141};
142
143/// Match an arbitrary zero/null constant. This includes
144/// zero_initializer for vectors and ConstantPointerNull for pointers.
145inline match_zero m_Zero() { return match_zero(); }
146
147struct match_neg_zero {
148 template <typename ITy> bool match(ITy *V) {
149 if (const auto *C = dyn_cast<Constant>(V))
150 return C->isNegativeZeroValue();
151 return false;
152 }
153};
154
155/// Match an arbitrary zero/null constant. This includes
156/// zero_initializer for vectors and ConstantPointerNull for pointers. For
157/// floating point constants, this will match negative zero but not positive
158/// zero
159inline match_neg_zero m_NegZero() { return match_neg_zero(); }
160
161struct match_any_zero {
162 template <typename ITy> bool match(ITy *V) {
163 if (const auto *C = dyn_cast<Constant>(V))
164 return C->isZeroValue();
165 return false;
166 }
167};
168
169/// Match an arbitrary zero/null constant. This includes
170/// zero_initializer for vectors and ConstantPointerNull for pointers. For
171/// floating point constants, this will match negative zero and positive zero
172inline match_any_zero m_AnyZero() { return match_any_zero(); }
173
174struct match_nan {
175 template <typename ITy> bool match(ITy *V) {
176 if (const auto *C = dyn_cast<ConstantFP>(V))
177 return C->isNaN();
178 return false;
179 }
180};
181
182/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
183inline match_nan m_NaN() { return match_nan(); }
184
185struct apint_match {
186 const APInt *&Res;
187
188 apint_match(const APInt *&R) : Res(R) {}
19
Returning without writing to 'R'
189
190 template <typename ITy> bool match(ITy *V) {
191 if (auto *CI = dyn_cast<ConstantInt>(V)) {
192 Res = &CI->getValue();
193 return true;
194 }
195 if (V->getType()->isVectorTy())
196 if (const auto *C = dyn_cast<Constant>(V))
197 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
198 Res = &CI->getValue();
199 return true;
200 }
201 return false;
202 }
203};
204// Either constexpr if or renaming ConstantFP::getValueAPF to
205// ConstantFP::getValue is needed to do it via single template
206// function for both apint/apfloat.
207struct apfloat_match {
208 const APFloat *&Res;
209 apfloat_match(const APFloat *&R) : Res(R) {}
210 template <typename ITy> bool match(ITy *V) {
211 if (auto *CI = dyn_cast<ConstantFP>(V)) {
212 Res = &CI->getValueAPF();
213 return true;
214 }
215 if (V->getType()->isVectorTy())
216 if (const auto *C = dyn_cast<Constant>(V))
217 if (auto *CI = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) {
218 Res = &CI->getValueAPF();
219 return true;
220 }
221 return false;
222 }
223};
224
225/// Match a ConstantInt or splatted ConstantVector, binding the
226/// specified pointer to the contained APInt.
227inline apint_match m_APInt(const APInt *&Res) { return Res; }
18
Calling constructor for 'apint_match'
20
Returning from constructor for 'apint_match'
21
Returning without writing to 'Res'
228
229/// Match a ConstantFP or splatted ConstantVector, binding the
230/// specified pointer to the contained APFloat.
231inline apfloat_match m_APFloat(const APFloat *&Res) { return Res; }
232
233template <int64_t Val> struct constantint_match {
234 template <typename ITy> bool match(ITy *V) {
235 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
236 const APInt &CIV = CI->getValue();
237 if (Val >= 0)
238 return CIV == static_cast<uint64_t>(Val);
239 // If Val is negative, and CI is shorter than it, truncate to the right
240 // number of bits. If it is larger, then we have to sign extend. Just
241 // compare their negated values.
242 return -CIV == -Val;
243 }
244 return false;
245 }
246};
247
248/// Match a ConstantInt with a specific value.
249template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
250 return constantint_match<Val>();
251}
252
253/// This helper class is used to match scalar and vector constants that satisfy
254/// a specified predicate. For vector constants, undefined elements are ignored.
255template <typename Predicate> struct cst_pred_ty : public Predicate {
256 template <typename ITy> bool match(ITy *V) {
257 if (const auto *CI = dyn_cast<ConstantInt>(V))
258 return this->isValue(CI->getValue());
259 if (V->getType()->isVectorTy()) {
260 if (const auto *C = dyn_cast<Constant>(V)) {
261 if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
262 return this->isValue(CI->getValue());
263
264 // Non-splat vector constant: check each element for a match.
265 unsigned NumElts = V->getType()->getVectorNumElements();
266 assert(NumElts != 0 && "Constant vector with no elements?")(static_cast <bool> (NumElts != 0 && "Constant vector with no elements?"
) ? void (0) : __assert_fail ("NumElts != 0 && \"Constant vector with no elements?\""
, "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/IR/PatternMatch.h"
, 266, __extension__ __PRETTY_FUNCTION__))
;
267 for (unsigned i = 0; i != NumElts; ++i) {
268 Constant *Elt = C->getAggregateElement(i);
269 if (!Elt)
270 return false;
271 if (isa<UndefValue>(Elt))
272 continue;
273 auto *CI = dyn_cast<ConstantInt>(Elt);
274 if (!CI || !this->isValue(CI->getValue()))
275 return false;
276 }
277 return true;
278 }
279 }
280 return false;
281 }
282};
283
284/// This helper class is used to match scalar and vector constants that
285/// satisfy a specified predicate, and bind them to an APInt.
286template <typename Predicate> struct api_pred_ty : public Predicate {
287 const APInt *&Res;
288
289 api_pred_ty(const APInt *&R) : Res(R) {}
290
291 template <typename ITy> bool match(ITy *V) {
292 if (const auto *CI = dyn_cast<ConstantInt>(V))
293 if (this->isValue(CI->getValue())) {
294 Res = &CI->getValue();
295 return true;
296 }
297 if (V->getType()->isVectorTy())
298 if (const auto *C = dyn_cast<Constant>(V))
299 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
300 if (this->isValue(CI->getValue())) {
301 Res = &CI->getValue();
302 return true;
303 }
304
305 return false;
306 }
307};
308
309///////////////////////////////////////////////////////////////////////////////
310//
311// Encapsulate constant value queries for use in templated predicate matchers.
312// This allows checking if constants match using compound predicates and works
313// with vector constants, possibly with relaxed constraints. For example, ignore
314// undef values.
315//
316///////////////////////////////////////////////////////////////////////////////
317
318struct is_all_ones {
319 bool isValue(const APInt &C) { return C.isAllOnesValue(); }
320};
321/// Match an integer or vector with all bits set.
322inline cst_pred_ty<is_all_ones> m_AllOnes() {
323 return cst_pred_ty<is_all_ones>();
324}
325
326struct is_maxsignedvalue {
327 bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
328};
329/// Match an integer or vector with values having all bits except for the high
330/// bit set (0x7f...).
331inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() {
332 return cst_pred_ty<is_maxsignedvalue>();
333}
334inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) {
335 return V;
336}
337
338struct is_negative {
339 bool isValue(const APInt &C) { return C.isNegative(); }
340};
341/// Match an integer or vector of negative values.
342inline cst_pred_ty<is_negative> m_Negative() {
343 return cst_pred_ty<is_negative>();
344}
345inline api_pred_ty<is_negative> m_Negative(const APInt *&V) {
346 return V;
347}
348
349struct is_nonnegative {
350 bool isValue(const APInt &C) { return C.isNonNegative(); }
351};
352/// Match an integer or vector of nonnegative values.
353inline cst_pred_ty<is_nonnegative> m_NonNegative() {
354 return cst_pred_ty<is_nonnegative>();
355}
356inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) {
357 return V;
358}
359
360struct is_one {
361 bool isValue(const APInt &C) { return C.isOneValue(); }
362};
363/// Match an integer 1 or a vector with all elements equal to 1.
364inline cst_pred_ty<is_one> m_One() {
365 return cst_pred_ty<is_one>();
366}
367
368struct is_power2 {
369 bool isValue(const APInt &C) { return C.isPowerOf2(); }
370};
371/// Match an integer or vector power-of-2.
372inline cst_pred_ty<is_power2> m_Power2() {
373 return cst_pred_ty<is_power2>();
374}
375inline api_pred_ty<is_power2> m_Power2(const APInt *&V) {
376 return V;
377}
378
379struct is_power2_or_zero {
380 bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
381};
382/// Match an integer or vector of 0 or power-of-2 values.
383inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() {
384 return cst_pred_ty<is_power2_or_zero>();
385}
386inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) {
387 return V;
388}
389
390struct is_sign_mask {
391 bool isValue(const APInt &C) { return C.isSignMask(); }
392};
393/// Match an integer or vector with only the sign bit(s) set.
394inline cst_pred_ty<is_sign_mask> m_SignMask() {
395 return cst_pred_ty<is_sign_mask>();
396}
397
398///////////////////////////////////////////////////////////////////////////////
399
400template <typename Class> struct bind_ty {
401 Class *&VR;
402
403 bind_ty(Class *&V) : VR(V) {}
404
405 template <typename ITy> bool match(ITy *V) {
406 if (auto *CV = dyn_cast<Class>(V)) {
407 VR = CV;
408 return true;
409 }
410 return false;
411 }
412};
413
414/// Match a value, capturing it if we match.
415inline bind_ty<Value> m_Value(Value *&V) { return V; }
416inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
417
418/// Match an instruction, capturing it if we match.
419inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
420/// Match a binary operator, capturing it if we match.
421inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
422
423/// Match a ConstantInt, capturing the value if we match.
424inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
425
426/// Match a Constant, capturing the value if we match.
427inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
428
429/// Match a ConstantFP, capturing the value if we match.
430inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
431
432/// Match a specified Value*.
433struct specificval_ty {
434 const Value *Val;
435
436 specificval_ty(const Value *V) : Val(V) {}
437
438 template <typename ITy> bool match(ITy *V) { return V == Val; }
439};
440
441/// Match if we have a specific specified value.
442inline specificval_ty m_Specific(const Value *V) { return V; }
443
444/// Match a specified floating point value or vector of all elements of
445/// that value.
446struct specific_fpval {
447 double Val;
448
449 specific_fpval(double V) : Val(V) {}
450
451 template <typename ITy> bool match(ITy *V) {
452 if (const auto *CFP = dyn_cast<ConstantFP>(V))
453 return CFP->isExactlyValue(Val);
454 if (V->getType()->isVectorTy())
455 if (const auto *C = dyn_cast<Constant>(V))
456 if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
457 return CFP->isExactlyValue(Val);
458 return false;
459 }
460};
461
462/// Match a specific floating point value or vector with all elements
463/// equal to the value.
464inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
465
466/// Match a float 1.0 or vector with all elements equal to 1.0.
467inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
468
469struct bind_const_intval_ty {
470 uint64_t &VR;
471
472 bind_const_intval_ty(uint64_t &V) : VR(V) {}
473
474 template <typename ITy> bool match(ITy *V) {
475 if (const auto *CV = dyn_cast<ConstantInt>(V))
476 if (CV->getValue().ule(UINT64_MAX(18446744073709551615UL))) {
477 VR = CV->getZExtValue();
478 return true;
479 }
480 return false;
481 }
482};
483
484/// Match a specified integer value or vector of all elements of that
485// value.
486struct specific_intval {
487 uint64_t Val;
488
489 specific_intval(uint64_t V) : Val(V) {}
490
491 template <typename ITy> bool match(ITy *V) {
492 const auto *CI = dyn_cast<ConstantInt>(V);
493 if (!CI && V->getType()->isVectorTy())
494 if (const auto *C = dyn_cast<Constant>(V))
495 CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue());
496
497 return CI && CI->getValue() == Val;
498 }
499};
500
501/// Match a specific integer value or vector with all elements equal to
502/// the value.
503inline specific_intval m_SpecificInt(uint64_t V) { return specific_intval(V); }
504
505/// Match a ConstantInt and bind to its value. This does not match
506/// ConstantInts wider than 64-bits.
507inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
508
509//===----------------------------------------------------------------------===//
510// Matcher for any binary operator.
511//
512template <typename LHS_t, typename RHS_t, bool Commutable = false>
513struct AnyBinaryOp_match {
514 LHS_t L;
515 RHS_t R;
516
517 AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
518
519 template <typename OpTy> bool match(OpTy *V) {
520 if (auto *I = dyn_cast<BinaryOperator>(V))
521 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
522 (Commutable && R.match(I->getOperand(0)) &&
523 L.match(I->getOperand(1)));
524 return false;
525 }
526};
527
528template <typename LHS, typename RHS>
529inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
530 return AnyBinaryOp_match<LHS, RHS>(L, R);
531}
532
533//===----------------------------------------------------------------------===//
534// Matchers for specific binary operators.
535//
536
537template <typename LHS_t, typename RHS_t, unsigned Opcode,
538 bool Commutable = false>
539struct BinaryOp_match {
540 LHS_t L;
541 RHS_t R;
542
543 BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
544
545 template <typename OpTy> bool match(OpTy *V) {
546 if (V->getValueID() == Value::InstructionVal + Opcode) {
547 auto *I = cast<BinaryOperator>(V);
548 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
549 (Commutable && R.match(I->getOperand(0)) &&
550 L.match(I->getOperand(1)));
551 }
552 if (auto *CE = dyn_cast<ConstantExpr>(V))
553 return CE->getOpcode() == Opcode &&
554 ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
555 (Commutable && R.match(CE->getOperand(0)) &&
556 L.match(CE->getOperand(1))));
557 return false;
558 }
559};
560
561template <typename LHS, typename RHS>
562inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
563 const RHS &R) {
564 return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
565}
566
567template <typename LHS, typename RHS>
568inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
569 const RHS &R) {
570 return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
571}
572
573template <typename LHS, typename RHS>
574inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
575 const RHS &R) {
576 return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
577}
578
579template <typename LHS, typename RHS>
580inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
581 const RHS &R) {
582 return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
583}
584
585template <typename LHS, typename RHS>
586inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
587 const RHS &R) {
588 return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
589}
590
591template <typename LHS, typename RHS>
592inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
593 const RHS &R) {
594 return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
595}
596
597template <typename LHS, typename RHS>
598inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
599 const RHS &R) {
600 return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
601}
602
603template <typename LHS, typename RHS>
604inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
605 const RHS &R) {
606 return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
607}
608
609template <typename LHS, typename RHS>
610inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
611 const RHS &R) {
612 return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
613}
614
615template <typename LHS, typename RHS>
616inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
617 const RHS &R) {
618 return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
619}
620
621template <typename LHS, typename RHS>
622inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
623 const RHS &R) {
624 return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
625}
626
627template <typename LHS, typename RHS>
628inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
629 const RHS &R) {
630 return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
631}
632
633template <typename LHS, typename RHS>
634inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
635 const RHS &R) {
636 return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
637}
638
639template <typename LHS, typename RHS>
640inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
641 const RHS &R) {
642 return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
643}
644
645template <typename LHS, typename RHS>
646inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
647 const RHS &R) {
648 return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
649}
650
651template <typename LHS, typename RHS>
652inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
653 const RHS &R) {
654 return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
655}
656
657template <typename LHS, typename RHS>
658inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
659 const RHS &R) {
660 return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
661}
662
663template <typename LHS, typename RHS>
664inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
665 const RHS &R) {
666 return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
667}
668
669template <typename LHS_t, typename RHS_t, unsigned Opcode,
670 unsigned WrapFlags = 0>
671struct OverflowingBinaryOp_match {
672 LHS_t L;
673 RHS_t R;
674
675 OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
676 : L(LHS), R(RHS) {}
677
678 template <typename OpTy> bool match(OpTy *V) {
679 if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
680 if (Op->getOpcode() != Opcode)
681 return false;
682 if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
683 !Op->hasNoUnsignedWrap())
684 return false;
685 if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
686 !Op->hasNoSignedWrap())
687 return false;
688 return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
689 }
690 return false;
691 }
692};
693
694template <typename LHS, typename RHS>
695inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
696 OverflowingBinaryOperator::NoSignedWrap>
697m_NSWAdd(const LHS &L, const RHS &R) {
698 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
699 OverflowingBinaryOperator::NoSignedWrap>(
700 L, R);
701}
702template <typename LHS, typename RHS>
703inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
704 OverflowingBinaryOperator::NoSignedWrap>
705m_NSWSub(const LHS &L, const RHS &R) {
706 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
707 OverflowingBinaryOperator::NoSignedWrap>(
708 L, R);
709}
710template <typename LHS, typename RHS>
711inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
712 OverflowingBinaryOperator::NoSignedWrap>
713m_NSWMul(const LHS &L, const RHS &R) {
714 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
715 OverflowingBinaryOperator::NoSignedWrap>(
716 L, R);
717}
718template <typename LHS, typename RHS>
719inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
720 OverflowingBinaryOperator::NoSignedWrap>
721m_NSWShl(const LHS &L, const RHS &R) {
722 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
723 OverflowingBinaryOperator::NoSignedWrap>(
724 L, R);
725}
726
727template <typename LHS, typename RHS>
728inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
729 OverflowingBinaryOperator::NoUnsignedWrap>
730m_NUWAdd(const LHS &L, const RHS &R) {
731 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
732 OverflowingBinaryOperator::NoUnsignedWrap>(
733 L, R);
734}
735template <typename LHS, typename RHS>
736inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
737 OverflowingBinaryOperator::NoUnsignedWrap>
738m_NUWSub(const LHS &L, const RHS &R) {
739 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
740 OverflowingBinaryOperator::NoUnsignedWrap>(
741 L, R);
742}
743template <typename LHS, typename RHS>
744inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
745 OverflowingBinaryOperator::NoUnsignedWrap>
746m_NUWMul(const LHS &L, const RHS &R) {
747 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
748 OverflowingBinaryOperator::NoUnsignedWrap>(
749 L, R);
750}
751template <typename LHS, typename RHS>
752inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
753 OverflowingBinaryOperator::NoUnsignedWrap>
754m_NUWShl(const LHS &L, const RHS &R) {
755 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
756 OverflowingBinaryOperator::NoUnsignedWrap>(
757 L, R);
758}
759
760//===----------------------------------------------------------------------===//
761// Class that matches a group of binary opcodes.
762//
763template <typename LHS_t, typename RHS_t, typename Predicate>
764struct BinOpPred_match : Predicate {
765 LHS_t L;
766 RHS_t R;
767
768 BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
769
770 template <typename OpTy> bool match(OpTy *V) {
771 if (auto *I = dyn_cast<Instruction>(V))
772 return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
773 R.match(I->getOperand(1));
774 if (auto *CE = dyn_cast<ConstantExpr>(V))
775 return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
776 R.match(CE->getOperand(1));
777 return false;
778 }
779};
780
781struct is_shift_op {
782 bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
783};
784
785struct is_right_shift_op {
786 bool isOpType(unsigned Opcode) {
787 return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
788 }
789};
790
791struct is_logical_shift_op {
792 bool isOpType(unsigned Opcode) {
793 return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
794 }
795};
796
797struct is_bitwiselogic_op {
798 bool isOpType(unsigned Opcode) {
799 return Instruction::isBitwiseLogicOp(Opcode);
800 }
801};
802
803struct is_idiv_op {
804 bool isOpType(unsigned Opcode) {
805 return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
806 }
807};
808
809/// Matches shift operations.
810template <typename LHS, typename RHS>
811inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
812 const RHS &R) {
813 return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
814}
815
816/// Matches logical shift operations.
817template <typename LHS, typename RHS>
818inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
819 const RHS &R) {
820 return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
821}
822
823/// Matches logical shift operations.
824template <typename LHS, typename RHS>
825inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
826m_LogicalShift(const LHS &L, const RHS &R) {
827 return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
828}
829
830/// Matches bitwise logic operations.
831template <typename LHS, typename RHS>
832inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
833m_BitwiseLogic(const LHS &L, const RHS &R) {
834 return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
835}
836
837/// Matches integer division operations.
838template <typename LHS, typename RHS>
839inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
840 const RHS &R) {
841 return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
842}
843
844//===----------------------------------------------------------------------===//
845// Class that matches exact binary ops.
846//
847template <typename SubPattern_t> struct Exact_match {
848 SubPattern_t SubPattern;
849
850 Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
851
852 template <typename OpTy> bool match(OpTy *V) {
853 if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
854 return PEO->isExact() && SubPattern.match(V);
855 return false;
856 }
857};
858
859template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
860 return SubPattern;
861}
862
863//===----------------------------------------------------------------------===//
864// Matchers for CmpInst classes
865//
866
867template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
868 bool Commutable = false>
869struct CmpClass_match {
870 PredicateTy &Predicate;
871 LHS_t L;
872 RHS_t R;
873
874 CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
875 : Predicate(Pred), L(LHS), R(RHS) {}
876
877 template <typename OpTy> bool match(OpTy *V) {
878 if (auto *I = dyn_cast<Class>(V))
879 if ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
880 (Commutable && R.match(I->getOperand(0)) &&
881 L.match(I->getOperand(1)))) {
882 Predicate = I->getPredicate();
883 return true;
884 }
885 return false;
886 }
887};
888
889template <typename LHS, typename RHS>
890inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
891m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
892 return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
893}
894
895template <typename LHS, typename RHS>
896inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
897m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
898 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
899}
900
901template <typename LHS, typename RHS>
902inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
903m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
904 return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
905}
906
907//===----------------------------------------------------------------------===//
908// Matchers for SelectInst classes
909//
910
911template <typename Cond_t, typename LHS_t, typename RHS_t>
912struct SelectClass_match {
913 Cond_t C;
914 LHS_t L;
915 RHS_t R;
916
917 SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, const RHS_t &RHS)
918 : C(Cond), L(LHS), R(RHS) {}
919
920 template <typename OpTy> bool match(OpTy *V) {
921 if (auto *I = dyn_cast<SelectInst>(V))
922 return C.match(I->getOperand(0)) && L.match(I->getOperand(1)) &&
923 R.match(I->getOperand(2));
924 return false;
925 }
926};
927
928template <typename Cond, typename LHS, typename RHS>
929inline SelectClass_match<Cond, LHS, RHS> m_Select(const Cond &C, const LHS &L,
930 const RHS &R) {
931 return SelectClass_match<Cond, LHS, RHS>(C, L, R);
932}
933
934/// This matches a select of two constants, e.g.:
935/// m_SelectCst<-1, 0>(m_Value(V))
936template <int64_t L, int64_t R, typename Cond>
937inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R>>
938m_SelectCst(const Cond &C) {
939 return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
940}
941
942//===----------------------------------------------------------------------===//
943// Matchers for CastInst classes
944//
945
946template <typename Op_t, unsigned Opcode> struct CastClass_match {
947 Op_t Op;
948
949 CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
950
951 template <typename OpTy> bool match(OpTy *V) {
952 if (auto *O = dyn_cast<Operator>(V))
953 return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
954 return false;
955 }
956};
957
958/// Matches BitCast.
959template <typename OpTy>
960inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
961 return CastClass_match<OpTy, Instruction::BitCast>(Op);
962}
963
964/// Matches PtrToInt.
965template <typename OpTy>
966inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
967 return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
968}
969
970/// Matches Trunc.
971template <typename OpTy>
972inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
973 return CastClass_match<OpTy, Instruction::Trunc>(Op);
974}
975
976/// Matches SExt.
977template <typename OpTy>
978inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
979 return CastClass_match<OpTy, Instruction::SExt>(Op);
980}
981
982/// Matches ZExt.
983template <typename OpTy>
984inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
985 return CastClass_match<OpTy, Instruction::ZExt>(Op);
986}
987
988template <typename OpTy>
989inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
990 CastClass_match<OpTy, Instruction::SExt>>
991m_ZExtOrSExt(const OpTy &Op) {
992 return m_CombineOr(m_ZExt(Op), m_SExt(Op));
993}
994
995/// Matches UIToFP.
996template <typename OpTy>
997inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
998 return CastClass_match<OpTy, Instruction::UIToFP>(Op);
999}
1000
1001/// Matches SIToFP.
1002template <typename OpTy>
1003inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
1004 return CastClass_match<OpTy, Instruction::SIToFP>(Op);
1005}
1006
1007/// Matches FPTrunc
1008template <typename OpTy>
1009inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
1010 return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
1011}
1012
1013/// Matches FPExt
1014template <typename OpTy>
1015inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
1016 return CastClass_match<OpTy, Instruction::FPExt>(Op);
1017}
1018
1019//===----------------------------------------------------------------------===//
1020// Matcher for LoadInst classes
1021//
1022
1023template <typename Op_t> struct LoadClass_match {
1024 Op_t Op;
1025
1026 LoadClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
1027
1028 template <typename OpTy> bool match(OpTy *V) {
1029 if (auto *LI = dyn_cast<LoadInst>(V))
1030 return Op.match(LI->getPointerOperand());
1031 return false;
1032 }
1033};
1034
1035/// Matches LoadInst.
1036template <typename OpTy> inline LoadClass_match<OpTy> m_Load(const OpTy &Op) {
1037 return LoadClass_match<OpTy>(Op);
1038}
1039//===----------------------------------------------------------------------===//
1040// Matchers for unary operators
1041//
1042
1043template <typename LHS_t> struct not_match {
1044 LHS_t L;
1045
1046 not_match(const LHS_t &LHS) : L(LHS) {}
1047
1048 template <typename OpTy> bool match(OpTy *V) {
1049 if (auto *O = dyn_cast<Operator>(V))
1050 if (O->getOpcode() == Instruction::Xor) {
1051 if (isAllOnes(O->getOperand(1)))
1052 return L.match(O->getOperand(0));
1053 if (isAllOnes(O->getOperand(0)))
1054 return L.match(O->getOperand(1));
1055 }
1056 return false;
1057 }
1058
1059private:
1060 bool isAllOnes(Value *V) {
1061 return isa<Constant>(V) && cast<Constant>(V)->isAllOnesValue();
1062 }
1063};
1064
1065template <typename LHS> inline not_match<LHS> m_Not(const LHS &L) { return L; }
1066
1067template <typename LHS_t> struct neg_match {
1068 LHS_t L;
1069
1070 neg_match(const LHS_t &LHS) : L(LHS) {}
1071
1072 template <typename OpTy> bool match(OpTy *V) {
1073 if (auto *O = dyn_cast<Operator>(V))
1074 if (O->getOpcode() == Instruction::Sub)
1075 return matchIfNeg(O->getOperand(0), O->getOperand(1));
1076 return false;
1077 }
1078
1079private:
1080 bool matchIfNeg(Value *LHS, Value *RHS) {
1081 return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
1082 isa<ConstantAggregateZero>(LHS)) &&
1083 L.match(RHS);
1084 }
1085};
1086
1087/// Match an integer negate.
1088template <typename LHS> inline neg_match<LHS> m_Neg(const LHS &L) { return L; }
1089
1090template <typename LHS_t> struct fneg_match {
1091 LHS_t L;
1092
1093 fneg_match(const LHS_t &LHS) : L(LHS) {}
1094
1095 template <typename OpTy> bool match(OpTy *V) {
1096 if (auto *O = dyn_cast<Operator>(V))
1097 if (O->getOpcode() == Instruction::FSub)
1098 return matchIfFNeg(O->getOperand(0), O->getOperand(1));
1099 return false;
1100 }
1101
1102private:
1103 bool matchIfFNeg(Value *LHS, Value *RHS) {
1104 if (const auto *C = dyn_cast<Constant>(LHS))
1105 return C->isNegativeZeroValue() && L.match(RHS);
1106 return false;
1107 }
1108};
1109
1110/// Match a floating point negate.
1111template <typename LHS> inline fneg_match<LHS> m_FNeg(const LHS &L) {
1112 return L;
1113}
1114
1115//===----------------------------------------------------------------------===//
1116// Matchers for control flow.
1117//
1118
1119struct br_match {
1120 BasicBlock *&Succ;
1121
1122 br_match(BasicBlock *&Succ) : Succ(Succ) {}
1123
1124 template <typename OpTy> bool match(OpTy *V) {
1125 if (auto *BI = dyn_cast<BranchInst>(V))
1126 if (BI->isUnconditional()) {
1127 Succ = BI->getSuccessor(0);
1128 return true;
1129 }
1130 return false;
1131 }
1132};
1133
1134inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
1135
1136template <typename Cond_t> struct brc_match {
1137 Cond_t Cond;
1138 BasicBlock *&T, *&F;
1139
1140 brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f)
1141 : Cond(C), T(t), F(f) {}
1142
1143 template <typename OpTy> bool match(OpTy *V) {
1144 if (auto *BI = dyn_cast<BranchInst>(V))
1145 if (BI->isConditional() && Cond.match(BI->getCondition())) {
1146 T = BI->getSuccessor(0);
1147 F = BI->getSuccessor(1);
1148 return true;
1149 }
1150 return false;
1151 }
1152};
1153
1154template <typename Cond_t>
1155inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
1156 return brc_match<Cond_t>(C, T, F);
1157}
1158
1159//===----------------------------------------------------------------------===//
1160// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
1161//
1162
1163template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
1164 bool Commutable = false>
1165struct MaxMin_match {
1166 LHS_t L;
1167 RHS_t R;
1168
1169 MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1170
1171 template <typename OpTy> bool match(OpTy *V) {
1172 // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
1173 auto *SI = dyn_cast<SelectInst>(V);
1174 if (!SI)
1175 return false;
1176 auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
1177 if (!Cmp)
1178 return false;
1179 // At this point we have a select conditioned on a comparison. Check that
1180 // it is the values returned by the select that are being compared.
1181 Value *TrueVal = SI->getTrueValue();
1182 Value *FalseVal = SI->getFalseValue();
1183 Value *LHS = Cmp->getOperand(0);
1184 Value *RHS = Cmp->getOperand(1);
1185 if ((TrueVal != LHS || FalseVal != RHS) &&
1186 (TrueVal != RHS || FalseVal != LHS))
1187 return false;
1188 typename CmpInst_t::Predicate Pred =
1189 LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
1190 // Does "(x pred y) ? x : y" represent the desired max/min operation?
1191 if (!Pred_t::match(Pred))
1192 return false;
1193 // It does! Bind the operands.
1194 return (L.match(LHS) && R.match(RHS)) ||
1195 (Commutable && R.match(LHS) && L.match(RHS));
1196 }
1197};
1198
1199/// Helper class for identifying signed max predicates.
1200struct smax_pred_ty {
1201 static bool match(ICmpInst::Predicate Pred) {
1202 return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
1203 }
1204};
1205
1206/// Helper class for identifying signed min predicates.
1207struct smin_pred_ty {
1208 static bool match(ICmpInst::Predicate Pred) {
1209 return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
1210 }
1211};
1212
1213/// Helper class for identifying unsigned max predicates.
1214struct umax_pred_ty {
1215 static bool match(ICmpInst::Predicate Pred) {
1216 return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
1217 }
1218};
1219
1220/// Helper class for identifying unsigned min predicates.
1221struct umin_pred_ty {
1222 static bool match(ICmpInst::Predicate Pred) {
1223 return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
1224 }
1225};
1226
1227/// Helper class for identifying ordered max predicates.
1228struct ofmax_pred_ty {
1229 static bool match(FCmpInst::Predicate Pred) {
1230 return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
1231 }
1232};
1233
1234/// Helper class for identifying ordered min predicates.
1235struct ofmin_pred_ty {
1236 static bool match(FCmpInst::Predicate Pred) {
1237 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
1238 }
1239};
1240
1241/// Helper class for identifying unordered max predicates.
1242struct ufmax_pred_ty {
1243 static bool match(FCmpInst::Predicate Pred) {
1244 return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
1245 }
1246};
1247
1248/// Helper class for identifying unordered min predicates.
1249struct ufmin_pred_ty {
1250 static bool match(FCmpInst::Predicate Pred) {
1251 return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
1252 }
1253};
1254
1255template <typename LHS, typename RHS>
1256inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
1257 const RHS &R) {
1258 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
1259}
1260
1261template <typename LHS, typename RHS>
1262inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
1263 const RHS &R) {
1264 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
1265}
1266
1267template <typename LHS, typename RHS>
1268inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
1269 const RHS &R) {
1270 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
1271}
1272
1273template <typename LHS, typename RHS>
1274inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
1275 const RHS &R) {
1276 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
1277}
1278
1279/// Match an 'ordered' floating point maximum function.
1280/// Floating point has one special value 'NaN'. Therefore, there is no total
1281/// order. However, if we can ignore the 'NaN' value (for example, because of a
1282/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1283/// semantics. In the presence of 'NaN' we have to preserve the original
1284/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
1285///
1286/// max(L, R) iff L and R are not NaN
1287/// m_OrdFMax(L, R) = R iff L or R are NaN
1288template <typename LHS, typename RHS>
1289inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
1290 const RHS &R) {
1291 return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
1292}
1293
1294/// Match an 'ordered' floating point minimum function.
1295/// Floating point has one special value 'NaN'. Therefore, there is no total
1296/// order. However, if we can ignore the 'NaN' value (for example, because of a
1297/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1298/// semantics. In the presence of 'NaN' we have to preserve the original
1299/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
1300///
1301/// min(L, R) iff L and R are not NaN
1302/// m_OrdFMin(L, R) = R iff L or R are NaN
1303template <typename LHS, typename RHS>
1304inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
1305 const RHS &R) {
1306 return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
1307}
1308
1309/// Match an 'unordered' floating point maximum function.
1310/// Floating point has one special value 'NaN'. Therefore, there is no total
1311/// order. However, if we can ignore the 'NaN' value (for example, because of a
1312/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1313/// semantics. In the presence of 'NaN' we have to preserve the original
1314/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
1315///
1316/// max(L, R) iff L and R are not NaN
1317/// m_UnordFMax(L, R) = L iff L or R are NaN
1318template <typename LHS, typename RHS>
1319inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
1320m_UnordFMax(const LHS &L, const RHS &R) {
1321 return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
1322}
1323
1324/// Match an 'unordered' floating point minimum function.
1325/// Floating point has one special value 'NaN'. Therefore, there is no total
1326/// order. However, if we can ignore the 'NaN' value (for example, because of a
1327/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1328/// semantics. In the presence of 'NaN' we have to preserve the original
1329/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
1330///
1331/// min(L, R) iff L and R are not NaN
1332/// m_UnordFMin(L, R) = L iff L or R are NaN
1333template <typename LHS, typename RHS>
1334inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
1335m_UnordFMin(const LHS &L, const RHS &R) {
1336 return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
1337}
1338
1339//===----------------------------------------------------------------------===//
1340// Matchers for overflow check patterns: e.g. (a + b) u< a
1341//
1342
1343template <typename LHS_t, typename RHS_t, typename Sum_t>
1344struct UAddWithOverflow_match {
1345 LHS_t L;
1346 RHS_t R;
1347 Sum_t S;
1348
1349 UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
1350 : L(L), R(R), S(S) {}
1351
1352 template <typename OpTy> bool match(OpTy *V) {
1353 Value *ICmpLHS, *ICmpRHS;
1354 ICmpInst::Predicate Pred;
1355 if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
1356 return false;
1357
1358 Value *AddLHS, *AddRHS;
1359 auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
1360
1361 // (a + b) u< a, (a + b) u< b
1362 if (Pred == ICmpInst::ICMP_ULT)
1363 if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
1364 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1365
1366 // a >u (a + b), b >u (a + b)
1367 if (Pred == ICmpInst::ICMP_UGT)
1368 if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
1369 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1370
1371 return false;
1372 }
1373};
1374
1375/// Match an icmp instruction checking for unsigned overflow on addition.
1376///
1377/// S is matched to the addition whose result is being checked for overflow, and
1378/// L and R are matched to the LHS and RHS of S.
1379template <typename LHS_t, typename RHS_t, typename Sum_t>
1380UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
1381m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
1382 return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
1383}
1384
1385template <typename Opnd_t> struct Argument_match {
1386 unsigned OpI;
1387 Opnd_t Val;
1388
1389 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
1390
1391 template <typename OpTy> bool match(OpTy *V) {
1392 CallSite CS(V);
1393 return CS.isCall() && Val.match(CS.getArgument(OpI));
1394 }
1395};
1396
1397/// Match an argument.
1398template <unsigned OpI, typename Opnd_t>
1399inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
1400 return Argument_match<Opnd_t>(OpI, Op);
1401}
1402
1403/// Intrinsic matchers.
1404struct IntrinsicID_match {
1405 unsigned ID;
1406
1407 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
1408
1409 template <typename OpTy> bool match(OpTy *V) {
1410 if (const auto *CI = dyn_cast<CallInst>(V))
1411 if (const auto *F = CI->getCalledFunction())
1412 return F->getIntrinsicID() == ID;
1413 return false;
1414 }
1415};
1416
1417/// Intrinsic matches are combinations of ID matchers, and argument
1418/// matchers. Higher arity matcher are defined recursively in terms of and-ing
1419/// them with lower arity matchers. Here's some convenient typedefs for up to
1420/// several arguments, and more can be added as needed
1421template <typename T0 = void, typename T1 = void, typename T2 = void,
1422 typename T3 = void, typename T4 = void, typename T5 = void,
1423 typename T6 = void, typename T7 = void, typename T8 = void,
1424 typename T9 = void, typename T10 = void>
1425struct m_Intrinsic_Ty;
1426template <typename T0> struct m_Intrinsic_Ty<T0> {
1427 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
1428};
1429template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
1430 using Ty =
1431 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
1432};
1433template <typename T0, typename T1, typename T2>
1434struct m_Intrinsic_Ty<T0, T1, T2> {
1435 using Ty =
1436 match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
1437 Argument_match<T2>>;
1438};
1439template <typename T0, typename T1, typename T2, typename T3>
1440struct m_Intrinsic_Ty<T0, T1, T2, T3> {
1441 using Ty =
1442 match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
1443 Argument_match<T3>>;
1444};
1445
1446/// Match intrinsic calls like this:
1447/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
1448template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
1449 return IntrinsicID_match(IntrID);
1450}
1451
1452template <Intrinsic::ID IntrID, typename T0>
1453inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
1454 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
1455}
1456
1457template <Intrinsic::ID IntrID, typename T0, typename T1>
1458inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
1459 const T1 &Op1) {
1460 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
1461}
1462
1463template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
1464inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
1465m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
1466 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
1467}
1468
1469template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
1470 typename T3>
1471inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
1472m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
1473 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
1474}
1475
1476// Helper intrinsic matching specializations.
1477template <typename Opnd0>
1478inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
1479 return m_Intrinsic<Intrinsic::bitreverse>(Op0);
1480}
1481
1482template <typename Opnd0>
1483inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
1484 return m_Intrinsic<Intrinsic::bswap>(Op0);
1485}
1486
1487template <typename Opnd0, typename Opnd1>
1488inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
1489 const Opnd1 &Op1) {
1490 return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
1491}
1492
1493template <typename Opnd0, typename Opnd1>
1494inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
1495 const Opnd1 &Op1) {
1496 return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
1497}
1498
1499template <typename Opnd_t> struct Signum_match {
1500 Opnd_t Val;
1501 Signum_match(const Opnd_t &V) : Val(V) {}
1502
1503 template <typename OpTy> bool match(OpTy *V) {
1504 unsigned TypeSize = V->getType()->getScalarSizeInBits();
1505 if (TypeSize == 0)
1506 return false;
1507
1508 unsigned ShiftWidth = TypeSize - 1;
1509 Value *OpL = nullptr, *OpR = nullptr;
1510
1511 // This is the representation of signum we match:
1512 //
1513 // signum(x) == (x >> 63) | (-x >>u 63)
1514 //
1515 // An i1 value is its own signum, so it's correct to match
1516 //
1517 // signum(x) == (x >> 0) | (-x >>u 0)
1518 //
1519 // for i1 values.
1520
1521 auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
1522 auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
1523 auto Signum = m_Or(LHS, RHS);
1524
1525 return Signum.match(V) && OpL == OpR && Val.match(OpL);
1526 }
1527};
1528
1529/// Matches a signum pattern.
1530///
1531/// signum(x) =
1532/// x > 0 -> 1
1533/// x == 0 -> 0
1534/// x < 0 -> -1
1535template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
1536 return Signum_match<Val_t>(V);
1537}
1538
1539//===----------------------------------------------------------------------===//
1540// Matchers for two-operands operators with the operators in either order
1541//
1542
1543/// Matches a BinaryOperator with LHS and RHS in either order.
1544template <typename LHS, typename RHS>
1545inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
1546 return AnyBinaryOp_match<LHS, RHS, true>(L, R);
1547}
1548
1549/// Matches an ICmp with a predicate over LHS and RHS in either order.
1550/// Does not swap the predicate.
1551template <typename LHS, typename RHS>
1552inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
1553m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1554 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
1555 R);
1556}
1557
1558/// Matches a Add with LHS and RHS in either order.
1559template <typename LHS, typename RHS>
1560inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
1561 const RHS &R) {
1562 return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
1563}
1564
1565/// Matches a Mul with LHS and RHS in either order.
1566template <typename LHS, typename RHS>
1567inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
1568 const RHS &R) {
1569 return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
1570}
1571
1572/// Matches an And with LHS and RHS in either order.
1573template <typename LHS, typename RHS>
1574inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
1575 const RHS &R) {
1576 return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
1577}
1578
1579/// Matches an Or with LHS and RHS in either order.
1580template <typename LHS, typename RHS>
1581inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
1582 const RHS &R) {
1583 return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
1584}
1585
1586/// Matches an Xor with LHS and RHS in either order.
1587template <typename LHS, typename RHS>
1588inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
1589 const RHS &R) {
1590 return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
1591}
1592
1593/// Matches an SMin with LHS and RHS in either order.
1594template <typename LHS, typename RHS>
1595inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
1596m_c_SMin(const LHS &L, const RHS &R) {
1597 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
1598}
1599/// Matches an SMax with LHS and RHS in either order.
1600template <typename LHS, typename RHS>
1601inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
1602m_c_SMax(const LHS &L, const RHS &R) {
1603 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
1604}
1605/// Matches a UMin with LHS and RHS in either order.
1606template <typename LHS, typename RHS>
1607inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
1608m_c_UMin(const LHS &L, const RHS &R) {
1609 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
1610}
1611/// Matches a UMax with LHS and RHS in either order.
1612template <typename LHS, typename RHS>
1613inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
1614m_c_UMax(const LHS &L, const RHS &R) {
1615 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
1616}
1617
1618/// Matches FAdd with LHS and RHS in either order.
1619template <typename LHS, typename RHS>
1620inline BinaryOp_match<LHS, RHS, Instruction::FAdd, true>
1621m_c_FAdd(const LHS &L, const RHS &R) {
1622 return BinaryOp_match<LHS, RHS, Instruction::FAdd, true>(L, R);
1623}
1624
1625/// Matches FMul with LHS and RHS in either order.
1626template <typename LHS, typename RHS>
1627inline BinaryOp_match<LHS, RHS, Instruction::FMul, true>
1628m_c_FMul(const LHS &L, const RHS &R) {
1629 return BinaryOp_match<LHS, RHS, Instruction::FMul, true>(L, R);
1630}
1631
1632} // end namespace PatternMatch
1633} // end namespace llvm
1634
1635#endif // LLVM_IR_PATTERNMATCH_H