Bug Summary

File:lib/Transforms/InstCombine/InstCombineShifts.cpp
Warning:line 205, column 25
Called C++ object pointer is uninitialized

Annotated Source Code

[?] Use j/k keys for keyboard navigation

/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp

1//===- InstCombineShifts.cpp ----------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitShl, visitLShr, and visitAShr functions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/Analysis/ConstantFolding.h"
16#include "llvm/Analysis/InstructionSimplify.h"
17#include "llvm/IR/IntrinsicInst.h"
18#include "llvm/IR/PatternMatch.h"
19using namespace llvm;
20using namespace PatternMatch;
21
22#define DEBUG_TYPE"instcombine" "instcombine"
23
24Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
25 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
26 assert(Op0->getType() == Op1->getType())(static_cast <bool> (Op0->getType() == Op1->getType
()) ? void (0) : __assert_fail ("Op0->getType() == Op1->getType()"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 26, __extension__ __PRETTY_FUNCTION__))
;
27
28 // See if we can fold away this shift.
29 if (SimplifyDemandedInstructionBits(I))
30 return &I;
31
32 // Try to fold constant and into select arguments.
33 if (isa<Constant>(Op0))
34 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
35 if (Instruction *R = FoldOpIntoSelect(I, SI))
36 return R;
37
38 if (Constant *CUI = dyn_cast<Constant>(Op1))
39 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
40 return Res;
41
42 // (C1 shift (A add C2)) -> (C1 shift C2) shift A)
43 // iff A and C2 are both positive.
44 Value *A;
45 Constant *C;
46 if (match(Op0, m_Constant()) && match(Op1, m_Add(m_Value(A), m_Constant(C))))
47 if (isKnownNonNegative(A, DL, 0, &AC, &I, &DT) &&
48 isKnownNonNegative(C, DL, 0, &AC, &I, &DT))
49 return BinaryOperator::Create(
50 I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), Op0, C), A);
51
52 // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
53 // Because shifts by negative values (which could occur if A were negative)
54 // are undefined.
55 const APInt *B;
56 if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) {
57 // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
58 // demand the sign bit (and many others) here??
59 Value *Rem = Builder.CreateAnd(A, ConstantInt::get(I.getType(), *B - 1),
60 Op1->getName());
61 I.setOperand(1, Rem);
62 return &I;
63 }
64
65 return nullptr;
66}
67
68/// Return true if we can simplify two logical (either left or right) shifts
69/// that have constant shift amounts: OuterShift (InnerShift X, C1), C2.
70static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
71 Instruction *InnerShift, InstCombiner &IC,
72 Instruction *CxtI) {
73 assert(InnerShift->isLogicalShift() && "Unexpected instruction type")(static_cast <bool> (InnerShift->isLogicalShift() &&
"Unexpected instruction type") ? void (0) : __assert_fail ("InnerShift->isLogicalShift() && \"Unexpected instruction type\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 73, __extension__ __PRETTY_FUNCTION__))
;
74
75 // We need constant scalar or constant splat shifts.
76 const APInt *InnerShiftConst;
77 if (!match(InnerShift->getOperand(1), m_APInt(InnerShiftConst)))
78 return false;
79
80 // Two logical shifts in the same direction:
81 // shl (shl X, C1), C2 --> shl X, C1 + C2
82 // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
83 bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
84 if (IsInnerShl == IsOuterShl)
85 return true;
86
87 // Equal shift amounts in opposite directions become bitwise 'and':
88 // lshr (shl X, C), C --> and X, C'
89 // shl (lshr X, C), C --> and X, C'
90 unsigned InnerShAmt = InnerShiftConst->getZExtValue();
91 if (InnerShAmt == OuterShAmt)
92 return true;
93
94 // If the 2nd shift is bigger than the 1st, we can fold:
95 // lshr (shl X, C1), C2 --> and (shl X, C1 - C2), C3
96 // shl (lshr X, C1), C2 --> and (lshr X, C1 - C2), C3
97 // but it isn't profitable unless we know the and'd out bits are already zero.
98 // Also, check that the inner shift is valid (less than the type width) or
99 // we'll crash trying to produce the bit mask for the 'and'.
100 unsigned TypeWidth = InnerShift->getType()->getScalarSizeInBits();
101 if (InnerShAmt > OuterShAmt && InnerShAmt < TypeWidth) {
102 unsigned MaskShift =
103 IsInnerShl ? TypeWidth - InnerShAmt : InnerShAmt - OuterShAmt;
104 APInt Mask = APInt::getLowBitsSet(TypeWidth, OuterShAmt) << MaskShift;
105 if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI))
106 return true;
107 }
108
109 return false;
110}
111
112/// See if we can compute the specified value, but shifted logically to the left
113/// or right by some number of bits. This should return true if the expression
114/// can be computed for the same cost as the current expression tree. This is
115/// used to eliminate extraneous shifting from things like:
116/// %C = shl i128 %A, 64
117/// %D = shl i128 %B, 96
118/// %E = or i128 %C, %D
119/// %F = lshr i128 %E, 64
120/// where the client will ask if E can be computed shifted right by 64-bits. If
121/// this succeeds, getShiftedValue() will be called to produce the value.
122static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
123 InstCombiner &IC, Instruction *CxtI) {
124 // We can always evaluate constants shifted.
125 if (isa<Constant>(V))
126 return true;
127
128 Instruction *I = dyn_cast<Instruction>(V);
129 if (!I) return false;
130
131 // If this is the opposite shift, we can directly reuse the input of the shift
132 // if the needed bits are already zero in the input. This allows us to reuse
133 // the value which means that we don't care if the shift has multiple uses.
134 // TODO: Handle opposite shift by exact value.
135 ConstantInt *CI = nullptr;
136 if ((IsLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
137 (!IsLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
138 if (CI->getZExtValue() == NumBits) {
139 // TODO: Check that the input bits are already zero with MaskedValueIsZero
140#if 0
141 // If this is a truncate of a logical shr, we can truncate it to a smaller
142 // lshr iff we know that the bits we would otherwise be shifting in are
143 // already zeros.
144 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
145 uint32_t BitWidth = Ty->getScalarSizeInBits();
146 if (MaskedValueIsZero(I->getOperand(0),
147 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
148 CI->getLimitedValue(BitWidth) < BitWidth) {
149 return CanEvaluateTruncated(I->getOperand(0), Ty);
150 }
151#endif
152
153 }
154 }
155
156 // We can't mutate something that has multiple uses: doing so would
157 // require duplicating the instruction in general, which isn't profitable.
158 if (!I->hasOneUse()) return false;
159
160 switch (I->getOpcode()) {
161 default: return false;
162 case Instruction::And:
163 case Instruction::Or:
164 case Instruction::Xor:
165 // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
166 return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) &&
167 canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I);
168
169 case Instruction::Shl:
170 case Instruction::LShr:
171 return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI);
172
173 case Instruction::Select: {
174 SelectInst *SI = cast<SelectInst>(I);
175 Value *TrueVal = SI->getTrueValue();
176 Value *FalseVal = SI->getFalseValue();
177 return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) &&
178 canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI);
179 }
180 case Instruction::PHI: {
181 // We can change a phi if we can change all operands. Note that we never
182 // get into trouble with cyclic PHIs here because we only consider
183 // instructions with a single use.
184 PHINode *PN = cast<PHINode>(I);
185 for (Value *IncValue : PN->incoming_values())
186 if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN))
187 return false;
188 return true;
189 }
190 }
191}
192
193/// Fold OuterShift (InnerShift X, C1), C2.
194/// See canEvaluateShiftedShift() for the constraints on these instructions.
195static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt,
196 bool IsOuterShl,
197 InstCombiner::BuilderTy &Builder) {
198 bool IsInnerShl = InnerShift->getOpcode() == Instruction::Shl;
15
Assuming the condition is false
199 Type *ShType = InnerShift->getType();
200 unsigned TypeWidth = ShType->getScalarSizeInBits();
201
202 // We only accept shifts-by-a-constant in canEvaluateShifted().
203 const APInt *C1;
16
'C1' declared without an initial value
204 match(InnerShift->getOperand(1), m_APInt(C1));
17
Calling 'm_APInt'
20
Returning from 'm_APInt'
205 unsigned InnerShAmt = C1->getZExtValue();
21
Called C++ object pointer is uninitialized
206
207 // Change the shift amount and clear the appropriate IR flags.
208 auto NewInnerShift = [&](unsigned ShAmt) {
209 InnerShift->setOperand(1, ConstantInt::get(ShType, ShAmt));
210 if (IsInnerShl) {
211 InnerShift->setHasNoUnsignedWrap(false);
212 InnerShift->setHasNoSignedWrap(false);
213 } else {
214 InnerShift->setIsExact(false);
215 }
216 return InnerShift;
217 };
218
219 // Two logical shifts in the same direction:
220 // shl (shl X, C1), C2 --> shl X, C1 + C2
221 // lshr (lshr X, C1), C2 --> lshr X, C1 + C2
222 if (IsInnerShl == IsOuterShl) {
223 // If this is an oversized composite shift, then unsigned shifts get 0.
224 if (InnerShAmt + OuterShAmt >= TypeWidth)
225 return Constant::getNullValue(ShType);
226
227 return NewInnerShift(InnerShAmt + OuterShAmt);
228 }
229
230 // Equal shift amounts in opposite directions become bitwise 'and':
231 // lshr (shl X, C), C --> and X, C'
232 // shl (lshr X, C), C --> and X, C'
233 if (InnerShAmt == OuterShAmt) {
234 APInt Mask = IsInnerShl
235 ? APInt::getLowBitsSet(TypeWidth, TypeWidth - OuterShAmt)
236 : APInt::getHighBitsSet(TypeWidth, TypeWidth - OuterShAmt);
237 Value *And = Builder.CreateAnd(InnerShift->getOperand(0),
238 ConstantInt::get(ShType, Mask));
239 if (auto *AndI = dyn_cast<Instruction>(And)) {
240 AndI->moveBefore(InnerShift);
241 AndI->takeName(InnerShift);
242 }
243 return And;
244 }
245
246 assert(InnerShAmt > OuterShAmt &&(static_cast <bool> (InnerShAmt > OuterShAmt &&
"Unexpected opposite direction logical shift pair") ? void (
0) : __assert_fail ("InnerShAmt > OuterShAmt && \"Unexpected opposite direction logical shift pair\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 247, __extension__ __PRETTY_FUNCTION__))
247 "Unexpected opposite direction logical shift pair")(static_cast <bool> (InnerShAmt > OuterShAmt &&
"Unexpected opposite direction logical shift pair") ? void (
0) : __assert_fail ("InnerShAmt > OuterShAmt && \"Unexpected opposite direction logical shift pair\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 247, __extension__ __PRETTY_FUNCTION__))
;
248
249 // In general, we would need an 'and' for this transform, but
250 // canEvaluateShiftedShift() guarantees that the masked-off bits are not used.
251 // lshr (shl X, C1), C2 --> shl X, C1 - C2
252 // shl (lshr X, C1), C2 --> lshr X, C1 - C2
253 return NewInnerShift(InnerShAmt - OuterShAmt);
254}
255
256/// When canEvaluateShifted() returns true for an expression, this function
257/// inserts the new computation that produces the shifted value.
258static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
259 InstCombiner &IC, const DataLayout &DL) {
260 // We can always evaluate constants shifted.
261 if (Constant *C = dyn_cast<Constant>(V)) {
1
Taking false branch
12
Taking false branch
262 if (isLeftShift)
263 V = IC.Builder.CreateShl(C, NumBits);
264 else
265 V = IC.Builder.CreateLShr(C, NumBits);
266 // If we got a constantexpr back, try to simplify it with TD info.
267 if (auto *C = dyn_cast<Constant>(V))
268 if (auto *FoldedC =
269 ConstantFoldConstant(C, DL, &IC.getTargetLibraryInfo()))
270 V = FoldedC;
271 return V;
272 }
273
274 Instruction *I = cast<Instruction>(V);
275 IC.Worklist.Add(I);
276
277 switch (I->getOpcode()) {
2
Control jumps to 'case PHI:' at line 300
13
Control jumps to 'case Shl:' at line 289
278 default: llvm_unreachable("Inconsistency with CanEvaluateShifted")::llvm::llvm_unreachable_internal("Inconsistency with CanEvaluateShifted"
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 278)
;
279 case Instruction::And:
280 case Instruction::Or:
281 case Instruction::Xor:
282 // Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
283 I->setOperand(
284 0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
285 I->setOperand(
286 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
287 return I;
288
289 case Instruction::Shl:
290 case Instruction::LShr:
291 return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
14
Calling 'foldShiftedShift'
292 IC.Builder);
293
294 case Instruction::Select:
295 I->setOperand(
296 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
297 I->setOperand(
298 2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
299 return I;
300 case Instruction::PHI: {
301 // We can change a phi if we can change all operands. Note that we never
302 // get into trouble with cyclic PHIs here because we only consider
303 // instructions with a single use.
304 PHINode *PN = cast<PHINode>(I);
305 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
3
Assuming 'i' is not equal to 'e'
4
Loop condition is true. Entering loop body
5
Assuming 'i' is not equal to 'e'
6
Loop condition is true. Entering loop body
7
Assuming 'i' is not equal to 'e'
8
Loop condition is true. Entering loop body
9
Assuming 'i' is not equal to 'e'
10
Loop condition is true. Entering loop body
306 PN->setIncomingValue(i, getShiftedValue(PN->getIncomingValue(i), NumBits,
11
Calling 'getShiftedValue'
307 isLeftShift, IC, DL));
308 return PN;
309 }
310 }
311}
312
313// If this is a bitwise operator or add with a constant RHS we might be able
314// to pull it through a shift.
315static bool canShiftBinOpWithConstantRHS(BinaryOperator &Shift,
316 BinaryOperator *BO,
317 const APInt &C) {
318 bool IsValid = true; // Valid only for And, Or Xor,
319 bool HighBitSet = false; // Transform ifhigh bit of constant set?
320
321 switch (BO->getOpcode()) {
322 default: IsValid = false; break; // Do not perform transform!
323 case Instruction::Add:
324 IsValid = Shift.getOpcode() == Instruction::Shl;
325 break;
326 case Instruction::Or:
327 case Instruction::Xor:
328 HighBitSet = false;
329 break;
330 case Instruction::And:
331 HighBitSet = true;
332 break;
333 }
334
335 // If this is a signed shift right, and the high bit is modified
336 // by the logical operation, do not perform the transformation.
337 // The HighBitSet boolean indicates the value of the high bit of
338 // the constant which would cause it to be modified for this
339 // operation.
340 //
341 if (IsValid && Shift.getOpcode() == Instruction::AShr)
342 IsValid = C.isNegative() == HighBitSet;
343
344 return IsValid;
345}
346
347Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
348 BinaryOperator &I) {
349 bool isLeftShift = I.getOpcode() == Instruction::Shl;
350
351 const APInt *Op1C;
352 if (!match(Op1, m_APInt(Op1C)))
353 return nullptr;
354
355 // See if we can propagate this shift into the input, this covers the trivial
356 // cast of lshr(shl(x,c1),c2) as well as other more complex cases.
357 if (I.getOpcode() != Instruction::AShr &&
358 canEvaluateShifted(Op0, Op1C->getZExtValue(), isLeftShift, *this, &I)) {
359 DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I <<"\n"; } } while (false)
360 " to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("instcombine")) { dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: "
<< I <<"\n"; } } while (false)
;
361
362 return replaceInstUsesWith(
363 I, getShiftedValue(Op0, Op1C->getZExtValue(), isLeftShift, *this, DL));
364 }
365
366 // See if we can simplify any instructions used by the instruction whose sole
367 // purpose is to compute bits we don't care about.
368 unsigned TypeBits = Op0->getType()->getScalarSizeInBits();
369
370 assert(!Op1C->uge(TypeBits) &&(static_cast <bool> (!Op1C->uge(TypeBits) &&
"Shift over the type width should have been removed already"
) ? void (0) : __assert_fail ("!Op1C->uge(TypeBits) && \"Shift over the type width should have been removed already\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 371, __extension__ __PRETTY_FUNCTION__))
371 "Shift over the type width should have been removed already")(static_cast <bool> (!Op1C->uge(TypeBits) &&
"Shift over the type width should have been removed already"
) ? void (0) : __assert_fail ("!Op1C->uge(TypeBits) && \"Shift over the type width should have been removed already\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 371, __extension__ __PRETTY_FUNCTION__))
;
372
373 if (Instruction *FoldedShift = foldOpWithConstantIntoOperand(I))
374 return FoldedShift;
375
376 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
377 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
378 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
379 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
380 // place. Don't try to do this transformation in this case. Also, we
381 // require that the input operand is a shift-by-constant so that we have
382 // confidence that the shifts will get folded together. We could do this
383 // xform in more cases, but it is unlikely to be profitable.
384 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
385 isa<ConstantInt>(TrOp->getOperand(1))) {
386 // Okay, we'll do this xform. Make the shift of shift.
387 Constant *ShAmt =
388 ConstantExpr::getZExt(cast<Constant>(Op1), TrOp->getType());
389 // (shift2 (shift1 & 0x00FF), c2)
390 Value *NSh = Builder.CreateBinOp(I.getOpcode(), TrOp, ShAmt, I.getName());
391
392 // For logical shifts, the truncation has the effect of making the high
393 // part of the register be zeros. Emulate this by inserting an AND to
394 // clear the top bits as needed. This 'and' will usually be zapped by
395 // other xforms later if dead.
396 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
397 unsigned DstSize = TI->getType()->getScalarSizeInBits();
398 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
399
400 // The mask we constructed says what the trunc would do if occurring
401 // between the shifts. We want to know the effect *after* the second
402 // shift. We know that it is a logical shift by a constant, so adjust the
403 // mask as appropriate.
404 if (I.getOpcode() == Instruction::Shl)
405 MaskV <<= Op1C->getZExtValue();
406 else {
407 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift")(static_cast <bool> (I.getOpcode() == Instruction::LShr
&& "Unknown logical shift") ? void (0) : __assert_fail
("I.getOpcode() == Instruction::LShr && \"Unknown logical shift\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 407, __extension__ __PRETTY_FUNCTION__))
;
408 MaskV.lshrInPlace(Op1C->getZExtValue());
409 }
410
411 // shift1 & 0x00FF
412 Value *And = Builder.CreateAnd(NSh,
413 ConstantInt::get(I.getContext(), MaskV),
414 TI->getName());
415
416 // Return the value truncated to the interesting size.
417 return new TruncInst(And, I.getType());
418 }
419 }
420
421 if (Op0->hasOneUse()) {
422 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
423 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
424 Value *V1, *V2;
425 ConstantInt *CC;
426 switch (Op0BO->getOpcode()) {
427 default: break;
428 case Instruction::Add:
429 case Instruction::And:
430 case Instruction::Or:
431 case Instruction::Xor: {
432 // These operators commute.
433 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
434 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
435 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
436 m_Specific(Op1)))) {
437 Value *YS = // (Y << C)
438 Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
439 // (X + (Y << C))
440 Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), YS, V1,
441 Op0BO->getOperand(1)->getName());
442 unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
443
444 APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
445 Constant *Mask = ConstantInt::get(I.getContext(), Bits);
446 if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
447 Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
448 return BinaryOperator::CreateAnd(X, Mask);
449 }
450
451 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
452 Value *Op0BOOp1 = Op0BO->getOperand(1);
453 if (isLeftShift && Op0BOOp1->hasOneUse() &&
454 match(Op0BOOp1,
455 m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
456 m_ConstantInt(CC)))) {
457 Value *YS = // (Y << C)
458 Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
459 // X & (CC << C)
460 Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
461 V1->getName()+".mask");
462 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
463 }
464 LLVM_FALLTHROUGH[[clang::fallthrough]];
465 }
466
467 case Instruction::Sub: {
468 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
469 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
470 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
471 m_Specific(Op1)))) {
472 Value *YS = // (Y << C)
473 Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
474 // (X + (Y << C))
475 Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), V1, YS,
476 Op0BO->getOperand(0)->getName());
477 unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
478
479 APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
480 Constant *Mask = ConstantInt::get(I.getContext(), Bits);
481 if (VectorType *VT = dyn_cast<VectorType>(X->getType()))
482 Mask = ConstantVector::getSplat(VT->getNumElements(), Mask);
483 return BinaryOperator::CreateAnd(X, Mask);
484 }
485
486 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
487 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
488 match(Op0BO->getOperand(0),
489 m_And(m_OneUse(m_Shr(m_Value(V1), m_Value(V2))),
490 m_ConstantInt(CC))) && V2 == Op1) {
491 Value *YS = // (Y << C)
492 Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
493 // X & (CC << C)
494 Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
495 V1->getName()+".mask");
496
497 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
498 }
499
500 break;
501 }
502 }
503
504
505 // If the operand is a bitwise operator with a constant RHS, and the
506 // shift is the only use, we can pull it out of the shift.
507 const APInt *Op0C;
508 if (match(Op0BO->getOperand(1), m_APInt(Op0C))) {
509 if (canShiftBinOpWithConstantRHS(I, Op0BO, *Op0C)) {
510 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
511 cast<Constant>(Op0BO->getOperand(1)), Op1);
512
513 Value *NewShift =
514 Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
515 NewShift->takeName(Op0BO);
516
517 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
518 NewRHS);
519 }
520 }
521
522 // If the operand is a subtract with a constant LHS, and the shift
523 // is the only use, we can pull it out of the shift.
524 // This folds (shl (sub C1, X), C2) -> (sub (C1 << C2), (shl X, C2))
525 if (isLeftShift && Op0BO->getOpcode() == Instruction::Sub &&
526 match(Op0BO->getOperand(0), m_APInt(Op0C))) {
527 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
528 cast<Constant>(Op0BO->getOperand(0)), Op1);
529
530 Value *NewShift = Builder.CreateShl(Op0BO->getOperand(1), Op1);
531 NewShift->takeName(Op0BO);
532
533 return BinaryOperator::CreateSub(NewRHS, NewShift);
534 }
535 }
536
537 // If we have a select that conditionally executes some binary operator,
538 // see if we can pull it the select and operator through the shift.
539 //
540 // For example, turning:
541 // shl (select C, (add X, C1), X), C2
542 // Into:
543 // Y = shl X, C2
544 // select C, (add Y, C1 << C2), Y
545 Value *Cond;
546 BinaryOperator *TBO;
547 Value *FalseVal;
548 if (match(Op0, m_Select(m_Value(Cond), m_OneUse(m_BinOp(TBO)),
549 m_Value(FalseVal)))) {
550 const APInt *C;
551 if (!isa<Constant>(FalseVal) && TBO->getOperand(0) == FalseVal &&
552 match(TBO->getOperand(1), m_APInt(C)) &&
553 canShiftBinOpWithConstantRHS(I, TBO, *C)) {
554 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
555 cast<Constant>(TBO->getOperand(1)), Op1);
556
557 Value *NewShift =
558 Builder.CreateBinOp(I.getOpcode(), FalseVal, Op1);
559 Value *NewOp = Builder.CreateBinOp(TBO->getOpcode(), NewShift,
560 NewRHS);
561 return SelectInst::Create(Cond, NewOp, NewShift);
562 }
563 }
564
565 BinaryOperator *FBO;
566 Value *TrueVal;
567 if (match(Op0, m_Select(m_Value(Cond), m_Value(TrueVal),
568 m_OneUse(m_BinOp(FBO))))) {
569 const APInt *C;
570 if (!isa<Constant>(TrueVal) && FBO->getOperand(0) == TrueVal &&
571 match(FBO->getOperand(1), m_APInt(C)) &&
572 canShiftBinOpWithConstantRHS(I, FBO, *C)) {
573 Constant *NewRHS = ConstantExpr::get(I.getOpcode(),
574 cast<Constant>(FBO->getOperand(1)), Op1);
575
576 Value *NewShift =
577 Builder.CreateBinOp(I.getOpcode(), TrueVal, Op1);
578 Value *NewOp = Builder.CreateBinOp(FBO->getOpcode(), NewShift,
579 NewRHS);
580 return SelectInst::Create(Cond, NewShift, NewOp);
581 }
582 }
583 }
584
585 return nullptr;
586}
587
588Instruction *InstCombiner::visitShl(BinaryOperator &I) {
589 if (Value *V = SimplifyVectorOp(I))
590 return replaceInstUsesWith(I, V);
591
592 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
593 if (Value *V =
594 SimplifyShlInst(Op0, Op1, I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
595 SQ.getWithInstruction(&I)))
596 return replaceInstUsesWith(I, V);
597
598 if (Instruction *V = commonShiftTransforms(I))
599 return V;
600
601 const APInt *ShAmtAPInt;
602 if (match(Op1, m_APInt(ShAmtAPInt))) {
603 unsigned ShAmt = ShAmtAPInt->getZExtValue();
604 unsigned BitWidth = I.getType()->getScalarSizeInBits();
605 Type *Ty = I.getType();
606
607 // shl (zext X), ShAmt --> zext (shl X, ShAmt)
608 // This is only valid if X would have zeros shifted out.
609 Value *X;
610 if (match(Op0, m_ZExt(m_Value(X)))) {
611 unsigned SrcWidth = X->getType()->getScalarSizeInBits();
612 if (ShAmt < SrcWidth &&
613 MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmt), 0, &I))
614 return new ZExtInst(Builder.CreateShl(X, ShAmt), Ty);
615 }
616
617 // (X >> C) << C --> X & (-1 << C)
618 if (match(Op0, m_Shr(m_Value(X), m_Specific(Op1)))) {
619 APInt Mask(APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt));
620 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
621 }
622
623 // Be careful about hiding shl instructions behind bit masks. They are used
624 // to represent multiplies by a constant, and it is important that simple
625 // arithmetic expressions are still recognizable by scalar evolution.
626 // The inexact versions are deferred to DAGCombine, so we don't hide shl
627 // behind a bit mask.
628 const APInt *ShOp1;
629 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_APInt(ShOp1))))) {
630 unsigned ShrAmt = ShOp1->getZExtValue();
631 if (ShrAmt < ShAmt) {
632 // If C1 < C2: (X >>?,exact C1) << C2 --> X << (C2 - C1)
633 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShrAmt);
634 auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
635 NewShl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
636 NewShl->setHasNoSignedWrap(I.hasNoSignedWrap());
637 return NewShl;
638 }
639 if (ShrAmt > ShAmt) {
640 // If C1 > C2: (X >>?exact C1) << C2 --> X >>?exact (C1 - C2)
641 Constant *ShiftDiff = ConstantInt::get(Ty, ShrAmt - ShAmt);
642 auto *NewShr = BinaryOperator::Create(
643 cast<BinaryOperator>(Op0)->getOpcode(), X, ShiftDiff);
644 NewShr->setIsExact(true);
645 return NewShr;
646 }
647 }
648
649 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1)))) {
650 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
651 // Oversized shifts are simplified to zero in InstSimplify.
652 if (AmtSum < BitWidth)
653 // (X << C1) << C2 --> X << (C1 + C2)
654 return BinaryOperator::CreateShl(X, ConstantInt::get(Ty, AmtSum));
655 }
656
657 // If the shifted-out value is known-zero, then this is a NUW shift.
658 if (!I.hasNoUnsignedWrap() &&
659 MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, ShAmt), 0, &I)) {
660 I.setHasNoUnsignedWrap();
661 return &I;
662 }
663
664 // If the shifted-out value is all signbits, then this is a NSW shift.
665 if (!I.hasNoSignedWrap() && ComputeNumSignBits(Op0, 0, &I) > ShAmt) {
666 I.setHasNoSignedWrap();
667 return &I;
668 }
669 }
670
671 Constant *C1;
672 if (match(Op1, m_Constant(C1))) {
673 Constant *C2;
674 Value *X;
675 // (C2 << X) << C1 --> (C2 << C1) << X
676 if (match(Op0, m_OneUse(m_Shl(m_Constant(C2), m_Value(X)))))
677 return BinaryOperator::CreateShl(ConstantExpr::getShl(C2, C1), X);
678
679 // (X * C2) << C1 --> X * (C2 << C1)
680 if (match(Op0, m_Mul(m_Value(X), m_Constant(C2))))
681 return BinaryOperator::CreateMul(X, ConstantExpr::getShl(C2, C1));
682 }
683
684 return nullptr;
685}
686
687Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
688 if (Value *V = SimplifyVectorOp(I))
689 return replaceInstUsesWith(I, V);
690
691 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
692 if (Value *V =
693 SimplifyLShrInst(Op0, Op1, I.isExact(), SQ.getWithInstruction(&I)))
694 return replaceInstUsesWith(I, V);
695
696 if (Instruction *R = commonShiftTransforms(I))
697 return R;
698
699 Type *Ty = I.getType();
700 const APInt *ShAmtAPInt;
701 if (match(Op1, m_APInt(ShAmtAPInt))) {
702 unsigned ShAmt = ShAmtAPInt->getZExtValue();
703 unsigned BitWidth = Ty->getScalarSizeInBits();
704 auto *II = dyn_cast<IntrinsicInst>(Op0);
705 if (II && isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt &&
706 (II->getIntrinsicID() == Intrinsic::ctlz ||
707 II->getIntrinsicID() == Intrinsic::cttz ||
708 II->getIntrinsicID() == Intrinsic::ctpop)) {
709 // ctlz.i32(x)>>5 --> zext(x == 0)
710 // cttz.i32(x)>>5 --> zext(x == 0)
711 // ctpop.i32(x)>>5 --> zext(x == -1)
712 bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
713 Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
714 Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
715 return new ZExtInst(Cmp, Ty);
716 }
717
718 Value *X;
719 const APInt *ShOp1;
720 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShOp1)))) {
721 unsigned ShlAmt = ShOp1->getZExtValue();
722 if (ShlAmt < ShAmt) {
723 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
724 if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
725 // (X <<nuw C1) >>u C2 --> X >>u (C2 - C1)
726 auto *NewLShr = BinaryOperator::CreateLShr(X, ShiftDiff);
727 NewLShr->setIsExact(I.isExact());
728 return NewLShr;
729 }
730 // (X << C1) >>u C2 --> (X >>u (C2 - C1)) & (-1 >> C2)
731 Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
732 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
733 return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
734 }
735 if (ShlAmt > ShAmt) {
736 Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
737 if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
738 // (X <<nuw C1) >>u C2 --> X <<nuw (C1 - C2)
739 auto *NewShl = BinaryOperator::CreateShl(X, ShiftDiff);
740 NewShl->setHasNoUnsignedWrap(true);
741 return NewShl;
742 }
743 // (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2)
744 Value *NewShl = Builder.CreateShl(X, ShiftDiff);
745 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
746 return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
747 }
748 assert(ShlAmt == ShAmt)(static_cast <bool> (ShlAmt == ShAmt) ? void (0) : __assert_fail
("ShlAmt == ShAmt", "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 748, __extension__ __PRETTY_FUNCTION__))
;
749 // (X << C) >>u C --> X & (-1 >>u C)
750 APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
751 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
752 }
753
754 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) &&
755 (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
756 assert(ShAmt < X->getType()->getScalarSizeInBits() &&(static_cast <bool> (ShAmt < X->getType()->getScalarSizeInBits
() && "Big shift not simplified to zero?") ? void (0)
: __assert_fail ("ShAmt < X->getType()->getScalarSizeInBits() && \"Big shift not simplified to zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 757, __extension__ __PRETTY_FUNCTION__))
757 "Big shift not simplified to zero?")(static_cast <bool> (ShAmt < X->getType()->getScalarSizeInBits
() && "Big shift not simplified to zero?") ? void (0)
: __assert_fail ("ShAmt < X->getType()->getScalarSizeInBits() && \"Big shift not simplified to zero?\""
, "/build/llvm-toolchain-snapshot-6.0~svn321639/lib/Transforms/InstCombine/InstCombineShifts.cpp"
, 757, __extension__ __PRETTY_FUNCTION__))
;
758 // lshr (zext iM X to iN), C --> zext (lshr X, C) to iN
759 Value *NewLShr = Builder.CreateLShr(X, ShAmt);
760 return new ZExtInst(NewLShr, Ty);
761 }
762
763 if (match(Op0, m_SExt(m_Value(X))) &&
764 (!Ty->isIntegerTy() || shouldChangeType(Ty, X->getType()))) {
765 // Are we moving the sign bit to the low bit and widening with high zeros?
766 unsigned SrcTyBitWidth = X->getType()->getScalarSizeInBits();
767 if (ShAmt == BitWidth - 1) {
768 // lshr (sext i1 X to iN), N-1 --> zext X to iN
769 if (SrcTyBitWidth == 1)
770 return new ZExtInst(X, Ty);
771
772 // lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
773 if (Op0->hasOneUse()) {
774 Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
775 return new ZExtInst(NewLShr, Ty);
776 }
777 }
778
779 // lshr (sext iM X to iN), N-M --> zext (ashr X, min(N-M, M-1)) to iN
780 if (ShAmt == BitWidth - SrcTyBitWidth && Op0->hasOneUse()) {
781 // The new shift amount can't be more than the narrow source type.
782 unsigned NewShAmt = std::min(ShAmt, SrcTyBitWidth - 1);
783 Value *AShr = Builder.CreateAShr(X, NewShAmt);
784 return new ZExtInst(AShr, Ty);
785 }
786 }
787
788 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShOp1)))) {
789 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
790 // Oversized shifts are simplified to zero in InstSimplify.
791 if (AmtSum < BitWidth)
792 // (X >>u C1) >>u C2 --> X >>u (C1 + C2)
793 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
794 }
795
796 // If the shifted-out value is known-zero, then this is an exact shift.
797 if (!I.isExact() &&
798 MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
799 I.setIsExact();
800 return &I;
801 }
802 }
803 return nullptr;
804}
805
806Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
807 if (Value *V = SimplifyVectorOp(I))
808 return replaceInstUsesWith(I, V);
809
810 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
811 if (Value *V =
812 SimplifyAShrInst(Op0, Op1, I.isExact(), SQ.getWithInstruction(&I)))
813 return replaceInstUsesWith(I, V);
814
815 if (Instruction *R = commonShiftTransforms(I))
816 return R;
817
818 Type *Ty = I.getType();
819 unsigned BitWidth = Ty->getScalarSizeInBits();
820 const APInt *ShAmtAPInt;
821 if (match(Op1, m_APInt(ShAmtAPInt))) {
822 unsigned ShAmt = ShAmtAPInt->getZExtValue();
823
824 // If the shift amount equals the difference in width of the destination
825 // and source scalar types:
826 // ashr (shl (zext X), C), C --> sext X
827 Value *X;
828 if (match(Op0, m_Shl(m_ZExt(m_Value(X)), m_Specific(Op1))) &&
829 ShAmt == BitWidth - X->getType()->getScalarSizeInBits())
830 return new SExtInst(X, Ty);
831
832 // We can't handle (X << C1) >>s C2. It shifts arbitrary bits in. However,
833 // we can handle (X <<nsw C1) >>s C2 since it only shifts in sign bits.
834 const APInt *ShOp1;
835 if (match(Op0, m_NSWShl(m_Value(X), m_APInt(ShOp1)))) {
836 unsigned ShlAmt = ShOp1->getZExtValue();
837 if (ShlAmt < ShAmt) {
838 // (X <<nsw C1) >>s C2 --> X >>s (C2 - C1)
839 Constant *ShiftDiff = ConstantInt::get(Ty, ShAmt - ShlAmt);
840 auto *NewAShr = BinaryOperator::CreateAShr(X, ShiftDiff);
841 NewAShr->setIsExact(I.isExact());
842 return NewAShr;
843 }
844 if (ShlAmt > ShAmt) {
845 // (X <<nsw C1) >>s C2 --> X <<nsw (C1 - C2)
846 Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmt - ShAmt);
847 auto *NewShl = BinaryOperator::Create(Instruction::Shl, X, ShiftDiff);
848 NewShl->setHasNoSignedWrap(true);
849 return NewShl;
850 }
851 }
852
853 if (match(Op0, m_AShr(m_Value(X), m_APInt(ShOp1)))) {
854 unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
855 // Oversized arithmetic shifts replicate the sign bit.
856 AmtSum = std::min(AmtSum, BitWidth - 1);
857 // (X >>s C1) >>s C2 --> X >>s (C1 + C2)
858 return BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum));
859 }
860
861 if (match(Op0, m_OneUse(m_SExt(m_Value(X)))) &&
862 (Ty->isVectorTy() || shouldChangeType(Ty, X->getType()))) {
863 // ashr (sext X), C --> sext (ashr X, C')
864 Type *SrcTy = X->getType();
865 ShAmt = std::min(ShAmt, SrcTy->getScalarSizeInBits() - 1);
866 Value *NewSh = Builder.CreateAShr(X, ConstantInt::get(SrcTy, ShAmt));
867 return new SExtInst(NewSh, Ty);
868 }
869
870 // If the shifted-out value is known-zero, then this is an exact shift.
871 if (!I.isExact() &&
872 MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
873 I.setIsExact();
874 return &I;
875 }
876 }
877
878 // See if we can turn a signed shr into an unsigned shr.
879 if (MaskedValueIsZero(Op0, APInt::getSignMask(BitWidth), 0, &I))
880 return BinaryOperator::CreateLShr(Op0, Op1);
881
882 return nullptr;
883}

/build/llvm-toolchain-snapshot-6.0~svn321639/include/llvm/IR/PatternMatch.h

1//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides a simple and efficient mechanism for performing general
11// tree-based pattern matches on the LLVM IR. The power of these routines is
12// that it allows you to write concise patterns that are expressive and easy to
13// understand. The other major advantage of this is that it allows you to
14// trivially capture/bind elements in the pattern to variables. For example,
15// you can do something like this:
16//
17// Value *Exp = ...
18// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
19// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
20// m_And(m_Value(Y), m_ConstantInt(C2))))) {
21// ... Pattern is matched and variables are bound ...
22// }
23//
24// This is primarily useful to things like the instruction combiner, but can
25// also be useful for static analysis tools or code generators.
26//
27//===----------------------------------------------------------------------===//
28
29#ifndef LLVM_IR_PATTERNMATCH_H
30#define LLVM_IR_PATTERNMATCH_H
31
32#include "llvm/ADT/APFloat.h"
33#include "llvm/ADT/APInt.h"
34#include "llvm/IR/CallSite.h"
35#include "llvm/IR/Constant.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/IR/Instruction.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include <cstdint>
45
46namespace llvm {
47namespace PatternMatch {
48
49template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
50 return const_cast<Pattern &>(P).match(V);
51}
52
53template <typename SubPattern_t> struct OneUse_match {
54 SubPattern_t SubPattern;
55
56 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
57
58 template <typename OpTy> bool match(OpTy *V) {
59 return V->hasOneUse() && SubPattern.match(V);
60 }
61};
62
63template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
64 return SubPattern;
65}
66
67template <typename Class> struct class_match {
68 template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
69};
70
71/// \brief Match an arbitrary value and ignore it.
72inline class_match<Value> m_Value() { return class_match<Value>(); }
73
74/// \brief Match an arbitrary binary operation and ignore it.
75inline class_match<BinaryOperator> m_BinOp() {
76 return class_match<BinaryOperator>();
77}
78
79/// \brief Matches any compare instruction and ignore it.
80inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
81
82/// \brief Match an arbitrary ConstantInt and ignore it.
83inline class_match<ConstantInt> m_ConstantInt() {
84 return class_match<ConstantInt>();
85}
86
87/// \brief Match an arbitrary undef constant.
88inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
89
90/// \brief Match an arbitrary Constant and ignore it.
91inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
92
93/// Matching combinators
94template <typename LTy, typename RTy> struct match_combine_or {
95 LTy L;
96 RTy R;
97
98 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
99
100 template <typename ITy> bool match(ITy *V) {
101 if (L.match(V))
102 return true;
103 if (R.match(V))
104 return true;
105 return false;
106 }
107};
108
109template <typename LTy, typename RTy> struct match_combine_and {
110 LTy L;
111 RTy R;
112
113 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
114
115 template <typename ITy> bool match(ITy *V) {
116 if (L.match(V))
117 if (R.match(V))
118 return true;
119 return false;
120 }
121};
122
123/// Combine two pattern matchers matching L || R
124template <typename LTy, typename RTy>
125inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
126 return match_combine_or<LTy, RTy>(L, R);
127}
128
129/// Combine two pattern matchers matching L && R
130template <typename LTy, typename RTy>
131inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
132 return match_combine_and<LTy, RTy>(L, R);
133}
134
135struct match_zero {
136 template <typename ITy> bool match(ITy *V) {
137 if (const auto *C = dyn_cast<Constant>(V))
138 return C->isNullValue();
139 return false;
140 }
141};
142
143/// \brief Match an arbitrary zero/null constant. This includes
144/// zero_initializer for vectors and ConstantPointerNull for pointers.
145inline match_zero m_Zero() { return match_zero(); }
146
147struct match_neg_zero {
148 template <typename ITy> bool match(ITy *V) {
149 if (const auto *C = dyn_cast<Constant>(V))
150 return C->isNegativeZeroValue();
151 return false;
152 }
153};
154
155/// \brief Match an arbitrary zero/null constant. This includes
156/// zero_initializer for vectors and ConstantPointerNull for pointers. For
157/// floating point constants, this will match negative zero but not positive
158/// zero
159inline match_neg_zero m_NegZero() { return match_neg_zero(); }
160
161struct match_any_zero {
162 template <typename ITy> bool match(ITy *V) {
163 if (const auto *C = dyn_cast<Constant>(V))
164 return C->isZeroValue();
165 return false;
166 }
167};
168
169/// \brief - Match an arbitrary zero/null constant. This includes
170/// zero_initializer for vectors and ConstantPointerNull for pointers. For
171/// floating point constants, this will match negative zero and positive zero
172inline match_any_zero m_AnyZero() { return match_any_zero(); }
173
174struct match_nan {
175 template <typename ITy> bool match(ITy *V) {
176 if (const auto *C = dyn_cast<ConstantFP>(V))
177 return C->isNaN();
178 return false;
179 }
180};
181
182/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
183inline match_nan m_NaN() { return match_nan(); }
184
185struct match_one {
186 template <typename ITy> bool match(ITy *V) {
187 if (const auto *C = dyn_cast<Constant>(V))
188 return C->isOneValue();
189 return false;
190 }
191};
192
193/// \brief Match an integer 1 or a vector with all elements equal to 1.
194inline match_one m_One() { return match_one(); }
195
196struct match_all_ones {
197 template <typename ITy> bool match(ITy *V) {
198 if (const auto *C = dyn_cast<Constant>(V))
199 return C->isAllOnesValue();
200 return false;
201 }
202};
203
204/// \brief Match an integer or vector with all bits set to true.
205inline match_all_ones m_AllOnes() { return match_all_ones(); }
206
207struct match_sign_mask {
208 template <typename ITy> bool match(ITy *V) {
209 if (const auto *C = dyn_cast<Constant>(V))
210 return C->isMinSignedValue();
211 return false;
212 }
213};
214
215/// \brief Match an integer or vector with only the sign bit(s) set.
216inline match_sign_mask m_SignMask() { return match_sign_mask(); }
217
218struct apint_match {
219 const APInt *&Res;
220
221 apint_match(const APInt *&R) : Res(R) {}
222
223 template <typename ITy> bool match(ITy *V) {
224 if (auto *CI = dyn_cast<ConstantInt>(V)) {
225 Res = &CI->getValue();
226 return true;
227 }
228 if (V->getType()->isVectorTy())
229 if (const auto *C = dyn_cast<Constant>(V))
230 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
231 Res = &CI->getValue();
232 return true;
233 }
234 return false;
235 }
236};
237// Either constexpr if or renaming ConstantFP::getValueAPF to
238// ConstantFP::getValue is needed to do it via single template
239// function for both apint/apfloat.
240struct apfloat_match {
241 const APFloat *&Res;
242 apfloat_match(const APFloat *&R) : Res(R) {}
243 template <typename ITy> bool match(ITy *V) {
244 if (auto *CI = dyn_cast<ConstantFP>(V)) {
245 Res = &CI->getValueAPF();
246 return true;
247 }
248 if (V->getType()->isVectorTy())
249 if (const auto *C = dyn_cast<Constant>(V))
250 if (auto *CI = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) {
251 Res = &CI->getValueAPF();
252 return true;
253 }
254 return false;
255 }
256};
257
258/// \brief Match a ConstantInt or splatted ConstantVector, binding the
259/// specified pointer to the contained APInt.
260inline apint_match m_APInt(const APInt *&Res) { return Res; }
18
Calling constructor for 'apint_match'
19
Returning from constructor for 'apint_match'
261
262/// \brief Match a ConstantFP or splatted ConstantVector, binding the
263/// specified pointer to the contained APFloat.
264inline apfloat_match m_APFloat(const APFloat *&Res) { return Res; }
265
266template <int64_t Val> struct constantint_match {
267 template <typename ITy> bool match(ITy *V) {
268 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
269 const APInt &CIV = CI->getValue();
270 if (Val >= 0)
271 return CIV == static_cast<uint64_t>(Val);
272 // If Val is negative, and CI is shorter than it, truncate to the right
273 // number of bits. If it is larger, then we have to sign extend. Just
274 // compare their negated values.
275 return -CIV == -Val;
276 }
277 return false;
278 }
279};
280
281/// \brief Match a ConstantInt with a specific value.
282template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
283 return constantint_match<Val>();
284}
285
286/// \brief This helper class is used to match scalar and vector constants that
287/// satisfy a specified predicate.
288template <typename Predicate> struct cst_pred_ty : public Predicate {
289 template <typename ITy> bool match(ITy *V) {
290 if (const auto *CI = dyn_cast<ConstantInt>(V))
291 return this->isValue(CI->getValue());
292 if (V->getType()->isVectorTy())
293 if (const auto *C = dyn_cast<Constant>(V))
294 if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
295 return this->isValue(CI->getValue());
296 return false;
297 }
298};
299
300/// \brief This helper class is used to match scalar and vector constants that
301/// satisfy a specified predicate, and bind them to an APInt.
302template <typename Predicate> struct api_pred_ty : public Predicate {
303 const APInt *&Res;
304
305 api_pred_ty(const APInt *&R) : Res(R) {}
306
307 template <typename ITy> bool match(ITy *V) {
308 if (const auto *CI = dyn_cast<ConstantInt>(V))
309 if (this->isValue(CI->getValue())) {
310 Res = &CI->getValue();
311 return true;
312 }
313 if (V->getType()->isVectorTy())
314 if (const auto *C = dyn_cast<Constant>(V))
315 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
316 if (this->isValue(CI->getValue())) {
317 Res = &CI->getValue();
318 return true;
319 }
320
321 return false;
322 }
323};
324
325struct is_power2 {
326 bool isValue(const APInt &C) { return C.isPowerOf2(); }
327};
328
329/// \brief Match an integer or vector power of 2.
330inline cst_pred_ty<is_power2> m_Power2() { return cst_pred_ty<is_power2>(); }
331inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { return V; }
332
333struct is_maxsignedvalue {
334 bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
335};
336
337inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() { return cst_pred_ty<is_maxsignedvalue>(); }
338inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) { return V; }
339
340template <typename Class> struct bind_ty {
341 Class *&VR;
342
343 bind_ty(Class *&V) : VR(V) {}
344
345 template <typename ITy> bool match(ITy *V) {
346 if (auto *CV = dyn_cast<Class>(V)) {
347 VR = CV;
348 return true;
349 }
350 return false;
351 }
352};
353
354/// \brief Match a value, capturing it if we match.
355inline bind_ty<Value> m_Value(Value *&V) { return V; }
356inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
357
358/// \brief Match an instruction, capturing it if we match.
359inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
360/// \brief Match a binary operator, capturing it if we match.
361inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
362
363/// \brief Match a ConstantInt, capturing the value if we match.
364inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
365
366/// \brief Match a Constant, capturing the value if we match.
367inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
368
369/// \brief Match a ConstantFP, capturing the value if we match.
370inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
371
372/// \brief Match a specified Value*.
373struct specificval_ty {
374 const Value *Val;
375
376 specificval_ty(const Value *V) : Val(V) {}
377
378 template <typename ITy> bool match(ITy *V) { return V == Val; }
379};
380
381/// \brief Match if we have a specific specified value.
382inline specificval_ty m_Specific(const Value *V) { return V; }
383
384/// \brief Match a specified floating point value or vector of all elements of
385/// that value.
386struct specific_fpval {
387 double Val;
388
389 specific_fpval(double V) : Val(V) {}
390
391 template <typename ITy> bool match(ITy *V) {
392 if (const auto *CFP = dyn_cast<ConstantFP>(V))
393 return CFP->isExactlyValue(Val);
394 if (V->getType()->isVectorTy())
395 if (const auto *C = dyn_cast<Constant>(V))
396 if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
397 return CFP->isExactlyValue(Val);
398 return false;
399 }
400};
401
402/// \brief Match a specific floating point value or vector with all elements
403/// equal to the value.
404inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
405
406/// \brief Match a float 1.0 or vector with all elements equal to 1.0.
407inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
408
409struct bind_const_intval_ty {
410 uint64_t &VR;
411
412 bind_const_intval_ty(uint64_t &V) : VR(V) {}
413
414 template <typename ITy> bool match(ITy *V) {
415 if (const auto *CV = dyn_cast<ConstantInt>(V))
416 if (CV->getValue().ule(UINT64_MAX(18446744073709551615UL))) {
417 VR = CV->getZExtValue();
418 return true;
419 }
420 return false;
421 }
422};
423
424/// \brief Match a specified integer value or vector of all elements of that
425// value.
426struct specific_intval {
427 uint64_t Val;
428
429 specific_intval(uint64_t V) : Val(V) {}
430
431 template <typename ITy> bool match(ITy *V) {
432 const auto *CI = dyn_cast<ConstantInt>(V);
433 if (!CI && V->getType()->isVectorTy())
434 if (const auto *C = dyn_cast<Constant>(V))
435 CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue());
436
437 return CI && CI->getValue() == Val;
438 }
439};
440
441/// \brief Match a specific integer value or vector with all elements equal to
442/// the value.
443inline specific_intval m_SpecificInt(uint64_t V) { return specific_intval(V); }
444
445/// \brief Match a ConstantInt and bind to its value. This does not match
446/// ConstantInts wider than 64-bits.
447inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
448
449//===----------------------------------------------------------------------===//
450// Matcher for any binary operator.
451//
452template <typename LHS_t, typename RHS_t, bool Commutable = false>
453struct AnyBinaryOp_match {
454 LHS_t L;
455 RHS_t R;
456
457 AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
458
459 template <typename OpTy> bool match(OpTy *V) {
460 if (auto *I = dyn_cast<BinaryOperator>(V))
461 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
462 (Commutable && R.match(I->getOperand(0)) &&
463 L.match(I->getOperand(1)));
464 return false;
465 }
466};
467
468template <typename LHS, typename RHS>
469inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
470 return AnyBinaryOp_match<LHS, RHS>(L, R);
471}
472
473//===----------------------------------------------------------------------===//
474// Matchers for specific binary operators.
475//
476
477template <typename LHS_t, typename RHS_t, unsigned Opcode,
478 bool Commutable = false>
479struct BinaryOp_match {
480 LHS_t L;
481 RHS_t R;
482
483 BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
484
485 template <typename OpTy> bool match(OpTy *V) {
486 if (V->getValueID() == Value::InstructionVal + Opcode) {
487 auto *I = cast<BinaryOperator>(V);
488 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
489 (Commutable && R.match(I->getOperand(0)) &&
490 L.match(I->getOperand(1)));
491 }
492 if (auto *CE = dyn_cast<ConstantExpr>(V))
493 return CE->getOpcode() == Opcode &&
494 ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
495 (Commutable && R.match(CE->getOperand(0)) &&
496 L.match(CE->getOperand(1))));
497 return false;
498 }
499};
500
501template <typename LHS, typename RHS>
502inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
503 const RHS &R) {
504 return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
505}
506
507template <typename LHS, typename RHS>
508inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
509 const RHS &R) {
510 return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
511}
512
513template <typename LHS, typename RHS>
514inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
515 const RHS &R) {
516 return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
517}
518
519template <typename LHS, typename RHS>
520inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
521 const RHS &R) {
522 return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
523}
524
525template <typename LHS, typename RHS>
526inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
527 const RHS &R) {
528 return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
529}
530
531template <typename LHS, typename RHS>
532inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
533 const RHS &R) {
534 return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
535}
536
537template <typename LHS, typename RHS>
538inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
539 const RHS &R) {
540 return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
541}
542
543template <typename LHS, typename RHS>
544inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
545 const RHS &R) {
546 return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
547}
548
549template <typename LHS, typename RHS>
550inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
551 const RHS &R) {
552 return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
553}
554
555template <typename LHS, typename RHS>
556inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
557 const RHS &R) {
558 return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
559}
560
561template <typename LHS, typename RHS>
562inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
563 const RHS &R) {
564 return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
565}
566
567template <typename LHS, typename RHS>
568inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
569 const RHS &R) {
570 return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
571}
572
573template <typename LHS, typename RHS>
574inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
575 const RHS &R) {
576 return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
577}
578
579template <typename LHS, typename RHS>
580inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
581 const RHS &R) {
582 return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
583}
584
585template <typename LHS, typename RHS>
586inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
587 const RHS &R) {
588 return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
589}
590
591template <typename LHS, typename RHS>
592inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
593 const RHS &R) {
594 return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
595}
596
597template <typename LHS, typename RHS>
598inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
599 const RHS &R) {
600 return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
601}
602
603template <typename LHS, typename RHS>
604inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
605 const RHS &R) {
606 return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
607}
608
609template <typename LHS_t, typename RHS_t, unsigned Opcode,
610 unsigned WrapFlags = 0>
611struct OverflowingBinaryOp_match {
612 LHS_t L;
613 RHS_t R;
614
615 OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
616 : L(LHS), R(RHS) {}
617
618 template <typename OpTy> bool match(OpTy *V) {
619 if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
620 if (Op->getOpcode() != Opcode)
621 return false;
622 if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
623 !Op->hasNoUnsignedWrap())
624 return false;
625 if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
626 !Op->hasNoSignedWrap())
627 return false;
628 return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
629 }
630 return false;
631 }
632};
633
634template <typename LHS, typename RHS>
635inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
636 OverflowingBinaryOperator::NoSignedWrap>
637m_NSWAdd(const LHS &L, const RHS &R) {
638 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
639 OverflowingBinaryOperator::NoSignedWrap>(
640 L, R);
641}
642template <typename LHS, typename RHS>
643inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
644 OverflowingBinaryOperator::NoSignedWrap>
645m_NSWSub(const LHS &L, const RHS &R) {
646 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
647 OverflowingBinaryOperator::NoSignedWrap>(
648 L, R);
649}
650template <typename LHS, typename RHS>
651inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
652 OverflowingBinaryOperator::NoSignedWrap>
653m_NSWMul(const LHS &L, const RHS &R) {
654 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
655 OverflowingBinaryOperator::NoSignedWrap>(
656 L, R);
657}
658template <typename LHS, typename RHS>
659inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
660 OverflowingBinaryOperator::NoSignedWrap>
661m_NSWShl(const LHS &L, const RHS &R) {
662 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
663 OverflowingBinaryOperator::NoSignedWrap>(
664 L, R);
665}
666
667template <typename LHS, typename RHS>
668inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
669 OverflowingBinaryOperator::NoUnsignedWrap>
670m_NUWAdd(const LHS &L, const RHS &R) {
671 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
672 OverflowingBinaryOperator::NoUnsignedWrap>(
673 L, R);
674}
675template <typename LHS, typename RHS>
676inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
677 OverflowingBinaryOperator::NoUnsignedWrap>
678m_NUWSub(const LHS &L, const RHS &R) {
679 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
680 OverflowingBinaryOperator::NoUnsignedWrap>(
681 L, R);
682}
683template <typename LHS, typename RHS>
684inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
685 OverflowingBinaryOperator::NoUnsignedWrap>
686m_NUWMul(const LHS &L, const RHS &R) {
687 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
688 OverflowingBinaryOperator::NoUnsignedWrap>(
689 L, R);
690}
691template <typename LHS, typename RHS>
692inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
693 OverflowingBinaryOperator::NoUnsignedWrap>
694m_NUWShl(const LHS &L, const RHS &R) {
695 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
696 OverflowingBinaryOperator::NoUnsignedWrap>(
697 L, R);
698}
699
700//===----------------------------------------------------------------------===//
701// Class that matches a group of binary opcodes.
702//
703template <typename LHS_t, typename RHS_t, typename Predicate>
704struct BinOpPred_match : Predicate {
705 LHS_t L;
706 RHS_t R;
707
708 BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
709
710 template <typename OpTy> bool match(OpTy *V) {
711 if (auto *I = dyn_cast<Instruction>(V))
712 return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
713 R.match(I->getOperand(1));
714 if (auto *CE = dyn_cast<ConstantExpr>(V))
715 return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
716 R.match(CE->getOperand(1));
717 return false;
718 }
719};
720
721struct is_shift_op {
722 bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
723};
724
725struct is_right_shift_op {
726 bool isOpType(unsigned Opcode) {
727 return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
728 }
729};
730
731struct is_logical_shift_op {
732 bool isOpType(unsigned Opcode) {
733 return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
734 }
735};
736
737struct is_bitwiselogic_op {
738 bool isOpType(unsigned Opcode) {
739 return Instruction::isBitwiseLogicOp(Opcode);
740 }
741};
742
743struct is_idiv_op {
744 bool isOpType(unsigned Opcode) {
745 return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
746 }
747};
748
749/// \brief Matches shift operations.
750template <typename LHS, typename RHS>
751inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
752 const RHS &R) {
753 return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
754}
755
756/// \brief Matches logical shift operations.
757template <typename LHS, typename RHS>
758inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
759 const RHS &R) {
760 return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
761}
762
763/// \brief Matches logical shift operations.
764template <typename LHS, typename RHS>
765inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
766m_LogicalShift(const LHS &L, const RHS &R) {
767 return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
768}
769
770/// \brief Matches bitwise logic operations.
771template <typename LHS, typename RHS>
772inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
773m_BitwiseLogic(const LHS &L, const RHS &R) {
774 return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
775}
776
777/// \brief Matches integer division operations.
778template <typename LHS, typename RHS>
779inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
780 const RHS &R) {
781 return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
782}
783
784//===----------------------------------------------------------------------===//
785// Class that matches exact binary ops.
786//
787template <typename SubPattern_t> struct Exact_match {
788 SubPattern_t SubPattern;
789
790 Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
791
792 template <typename OpTy> bool match(OpTy *V) {
793 if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
794 return PEO->isExact() && SubPattern.match(V);
795 return false;
796 }
797};
798
799template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
800 return SubPattern;
801}
802
803//===----------------------------------------------------------------------===//
804// Matchers for CmpInst classes
805//
806
807template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
808 bool Commutable = false>
809struct CmpClass_match {
810 PredicateTy &Predicate;
811 LHS_t L;
812 RHS_t R;
813
814 CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
815 : Predicate(Pred), L(LHS), R(RHS) {}
816
817 template <typename OpTy> bool match(OpTy *V) {
818 if (auto *I = dyn_cast<Class>(V))
819 if ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
820 (Commutable && R.match(I->getOperand(0)) &&
821 L.match(I->getOperand(1)))) {
822 Predicate = I->getPredicate();
823 return true;
824 }
825 return false;
826 }
827};
828
829template <typename LHS, typename RHS>
830inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
831m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
832 return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
833}
834
835template <typename LHS, typename RHS>
836inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
837m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
838 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
839}
840
841template <typename LHS, typename RHS>
842inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
843m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
844 return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
845}
846
847//===----------------------------------------------------------------------===//
848// Matchers for SelectInst classes
849//
850
851template <typename Cond_t, typename LHS_t, typename RHS_t>
852struct SelectClass_match {
853 Cond_t C;
854 LHS_t L;
855 RHS_t R;
856
857 SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, const RHS_t &RHS)
858 : C(Cond), L(LHS), R(RHS) {}
859
860 template <typename OpTy> bool match(OpTy *V) {
861 if (auto *I = dyn_cast<SelectInst>(V))
862 return C.match(I->getOperand(0)) && L.match(I->getOperand(1)) &&
863 R.match(I->getOperand(2));
864 return false;
865 }
866};
867
868template <typename Cond, typename LHS, typename RHS>
869inline SelectClass_match<Cond, LHS, RHS> m_Select(const Cond &C, const LHS &L,
870 const RHS &R) {
871 return SelectClass_match<Cond, LHS, RHS>(C, L, R);
872}
873
874/// \brief This matches a select of two constants, e.g.:
875/// m_SelectCst<-1, 0>(m_Value(V))
876template <int64_t L, int64_t R, typename Cond>
877inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R>>
878m_SelectCst(const Cond &C) {
879 return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
880}
881
882//===----------------------------------------------------------------------===//
883// Matchers for CastInst classes
884//
885
886template <typename Op_t, unsigned Opcode> struct CastClass_match {
887 Op_t Op;
888
889 CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
890
891 template <typename OpTy> bool match(OpTy *V) {
892 if (auto *O = dyn_cast<Operator>(V))
893 return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
894 return false;
895 }
896};
897
898/// \brief Matches BitCast.
899template <typename OpTy>
900inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
901 return CastClass_match<OpTy, Instruction::BitCast>(Op);
902}
903
904/// \brief Matches PtrToInt.
905template <typename OpTy>
906inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
907 return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
908}
909
910/// \brief Matches Trunc.
911template <typename OpTy>
912inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
913 return CastClass_match<OpTy, Instruction::Trunc>(Op);
914}
915
916/// \brief Matches SExt.
917template <typename OpTy>
918inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
919 return CastClass_match<OpTy, Instruction::SExt>(Op);
920}
921
922/// \brief Matches ZExt.
923template <typename OpTy>
924inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
925 return CastClass_match<OpTy, Instruction::ZExt>(Op);
926}
927
928template <typename OpTy>
929inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
930 CastClass_match<OpTy, Instruction::SExt>>
931m_ZExtOrSExt(const OpTy &Op) {
932 return m_CombineOr(m_ZExt(Op), m_SExt(Op));
933}
934
935/// \brief Matches UIToFP.
936template <typename OpTy>
937inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
938 return CastClass_match<OpTy, Instruction::UIToFP>(Op);
939}
940
941/// \brief Matches SIToFP.
942template <typename OpTy>
943inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
944 return CastClass_match<OpTy, Instruction::SIToFP>(Op);
945}
946
947/// \brief Matches FPTrunc
948template <typename OpTy>
949inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
950 return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
951}
952
953/// \brief Matches FPExt
954template <typename OpTy>
955inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
956 return CastClass_match<OpTy, Instruction::FPExt>(Op);
957}
958
959//===----------------------------------------------------------------------===//
960// Matcher for LoadInst classes
961//
962
963template <typename Op_t> struct LoadClass_match {
964 Op_t Op;
965
966 LoadClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
967
968 template <typename OpTy> bool match(OpTy *V) {
969 if (auto *LI = dyn_cast<LoadInst>(V))
970 return Op.match(LI->getPointerOperand());
971 return false;
972 }
973};
974
975/// Matches LoadInst.
976template <typename OpTy> inline LoadClass_match<OpTy> m_Load(const OpTy &Op) {
977 return LoadClass_match<OpTy>(Op);
978}
979//===----------------------------------------------------------------------===//
980// Matchers for unary operators
981//
982
983template <typename LHS_t> struct not_match {
984 LHS_t L;
985
986 not_match(const LHS_t &LHS) : L(LHS) {}
987
988 template <typename OpTy> bool match(OpTy *V) {
989 if (auto *O = dyn_cast<Operator>(V))
990 if (O->getOpcode() == Instruction::Xor) {
991 if (isAllOnes(O->getOperand(1)))
992 return L.match(O->getOperand(0));
993 if (isAllOnes(O->getOperand(0)))
994 return L.match(O->getOperand(1));
995 }
996 return false;
997 }
998
999private:
1000 bool isAllOnes(Value *V) {
1001 return isa<Constant>(V) && cast<Constant>(V)->isAllOnesValue();
1002 }
1003};
1004
1005template <typename LHS> inline not_match<LHS> m_Not(const LHS &L) { return L; }
1006
1007template <typename LHS_t> struct neg_match {
1008 LHS_t L;
1009
1010 neg_match(const LHS_t &LHS) : L(LHS) {}
1011
1012 template <typename OpTy> bool match(OpTy *V) {
1013 if (auto *O = dyn_cast<Operator>(V))
1014 if (O->getOpcode() == Instruction::Sub)
1015 return matchIfNeg(O->getOperand(0), O->getOperand(1));
1016 return false;
1017 }
1018
1019private:
1020 bool matchIfNeg(Value *LHS, Value *RHS) {
1021 return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
1022 isa<ConstantAggregateZero>(LHS)) &&
1023 L.match(RHS);
1024 }
1025};
1026
1027/// \brief Match an integer negate.
1028template <typename LHS> inline neg_match<LHS> m_Neg(const LHS &L) { return L; }
1029
1030template <typename LHS_t> struct fneg_match {
1031 LHS_t L;
1032
1033 fneg_match(const LHS_t &LHS) : L(LHS) {}
1034
1035 template <typename OpTy> bool match(OpTy *V) {
1036 if (auto *O = dyn_cast<Operator>(V))
1037 if (O->getOpcode() == Instruction::FSub)
1038 return matchIfFNeg(O->getOperand(0), O->getOperand(1));
1039 return false;
1040 }
1041
1042private:
1043 bool matchIfFNeg(Value *LHS, Value *RHS) {
1044 if (const auto *C = dyn_cast<ConstantFP>(LHS))
1045 return C->isNegativeZeroValue() && L.match(RHS);
1046 return false;
1047 }
1048};
1049
1050/// \brief Match a floating point negate.
1051template <typename LHS> inline fneg_match<LHS> m_FNeg(const LHS &L) {
1052 return L;
1053}
1054
1055//===----------------------------------------------------------------------===//
1056// Matchers for control flow.
1057//
1058
1059struct br_match {
1060 BasicBlock *&Succ;
1061
1062 br_match(BasicBlock *&Succ) : Succ(Succ) {}
1063
1064 template <typename OpTy> bool match(OpTy *V) {
1065 if (auto *BI = dyn_cast<BranchInst>(V))
1066 if (BI->isUnconditional()) {
1067 Succ = BI->getSuccessor(0);
1068 return true;
1069 }
1070 return false;
1071 }
1072};
1073
1074inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
1075
1076template <typename Cond_t> struct brc_match {
1077 Cond_t Cond;
1078 BasicBlock *&T, *&F;
1079
1080 brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f)
1081 : Cond(C), T(t), F(f) {}
1082
1083 template <typename OpTy> bool match(OpTy *V) {
1084 if (auto *BI = dyn_cast<BranchInst>(V))
1085 if (BI->isConditional() && Cond.match(BI->getCondition())) {
1086 T = BI->getSuccessor(0);
1087 F = BI->getSuccessor(1);
1088 return true;
1089 }
1090 return false;
1091 }
1092};
1093
1094template <typename Cond_t>
1095inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
1096 return brc_match<Cond_t>(C, T, F);
1097}
1098
1099//===----------------------------------------------------------------------===//
1100// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
1101//
1102
1103template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
1104 bool Commutable = false>
1105struct MaxMin_match {
1106 LHS_t L;
1107 RHS_t R;
1108
1109 MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1110
1111 template <typename OpTy> bool match(OpTy *V) {
1112 // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
1113 auto *SI = dyn_cast<SelectInst>(V);
1114 if (!SI)
1115 return false;
1116 auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
1117 if (!Cmp)
1118 return false;
1119 // At this point we have a select conditioned on a comparison. Check that
1120 // it is the values returned by the select that are being compared.
1121 Value *TrueVal = SI->getTrueValue();
1122 Value *FalseVal = SI->getFalseValue();
1123 Value *LHS = Cmp->getOperand(0);
1124 Value *RHS = Cmp->getOperand(1);
1125 if ((TrueVal != LHS || FalseVal != RHS) &&
1126 (TrueVal != RHS || FalseVal != LHS))
1127 return false;
1128 typename CmpInst_t::Predicate Pred =
1129 LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
1130 // Does "(x pred y) ? x : y" represent the desired max/min operation?
1131 if (!Pred_t::match(Pred))
1132 return false;
1133 // It does! Bind the operands.
1134 return (L.match(LHS) && R.match(RHS)) ||
1135 (Commutable && R.match(LHS) && L.match(RHS));
1136 }
1137};
1138
1139/// \brief Helper class for identifying signed max predicates.
1140struct smax_pred_ty {
1141 static bool match(ICmpInst::Predicate Pred) {
1142 return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
1143 }
1144};
1145
1146/// \brief Helper class for identifying signed min predicates.
1147struct smin_pred_ty {
1148 static bool match(ICmpInst::Predicate Pred) {
1149 return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
1150 }
1151};
1152
1153/// \brief Helper class for identifying unsigned max predicates.
1154struct umax_pred_ty {
1155 static bool match(ICmpInst::Predicate Pred) {
1156 return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
1157 }
1158};
1159
1160/// \brief Helper class for identifying unsigned min predicates.
1161struct umin_pred_ty {
1162 static bool match(ICmpInst::Predicate Pred) {
1163 return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
1164 }
1165};
1166
1167/// \brief Helper class for identifying ordered max predicates.
1168struct ofmax_pred_ty {
1169 static bool match(FCmpInst::Predicate Pred) {
1170 return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
1171 }
1172};
1173
1174/// \brief Helper class for identifying ordered min predicates.
1175struct ofmin_pred_ty {
1176 static bool match(FCmpInst::Predicate Pred) {
1177 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
1178 }
1179};
1180
1181/// \brief Helper class for identifying unordered max predicates.
1182struct ufmax_pred_ty {
1183 static bool match(FCmpInst::Predicate Pred) {
1184 return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
1185 }
1186};
1187
1188/// \brief Helper class for identifying unordered min predicates.
1189struct ufmin_pred_ty {
1190 static bool match(FCmpInst::Predicate Pred) {
1191 return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
1192 }
1193};
1194
1195template <typename LHS, typename RHS>
1196inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
1197 const RHS &R) {
1198 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
1199}
1200
1201template <typename LHS, typename RHS>
1202inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
1203 const RHS &R) {
1204 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
1205}
1206
1207template <typename LHS, typename RHS>
1208inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
1209 const RHS &R) {
1210 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
1211}
1212
1213template <typename LHS, typename RHS>
1214inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
1215 const RHS &R) {
1216 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
1217}
1218
1219/// \brief Match an 'ordered' floating point maximum function.
1220/// Floating point has one special value 'NaN'. Therefore, there is no total
1221/// order. However, if we can ignore the 'NaN' value (for example, because of a
1222/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1223/// semantics. In the presence of 'NaN' we have to preserve the original
1224/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
1225///
1226/// max(L, R) iff L and R are not NaN
1227/// m_OrdFMax(L, R) = R iff L or R are NaN
1228template <typename LHS, typename RHS>
1229inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
1230 const RHS &R) {
1231 return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
1232}
1233
1234/// \brief Match an 'ordered' floating point minimum function.
1235/// Floating point has one special value 'NaN'. Therefore, there is no total
1236/// order. However, if we can ignore the 'NaN' value (for example, because of a
1237/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1238/// semantics. In the presence of 'NaN' we have to preserve the original
1239/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
1240///
1241/// min(L, R) iff L and R are not NaN
1242/// m_OrdFMin(L, R) = R iff L or R are NaN
1243template <typename LHS, typename RHS>
1244inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
1245 const RHS &R) {
1246 return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
1247}
1248
1249/// \brief Match an 'unordered' floating point maximum function.
1250/// Floating point has one special value 'NaN'. Therefore, there is no total
1251/// order. However, if we can ignore the 'NaN' value (for example, because of a
1252/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1253/// semantics. In the presence of 'NaN' we have to preserve the original
1254/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
1255///
1256/// max(L, R) iff L and R are not NaN
1257/// m_UnordFMax(L, R) = L iff L or R are NaN
1258template <typename LHS, typename RHS>
1259inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
1260m_UnordFMax(const LHS &L, const RHS &R) {
1261 return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
1262}
1263
1264/// \brief Match an 'unordered' floating point minimum function.
1265/// Floating point has one special value 'NaN'. Therefore, there is no total
1266/// order. However, if we can ignore the 'NaN' value (for example, because of a
1267/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1268/// semantics. In the presence of 'NaN' we have to preserve the original
1269/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
1270///
1271/// min(L, R) iff L and R are not NaN
1272/// m_UnordFMin(L, R) = L iff L or R are NaN
1273template <typename LHS, typename RHS>
1274inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
1275m_UnordFMin(const LHS &L, const RHS &R) {
1276 return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
1277}
1278
1279//===----------------------------------------------------------------------===//
1280// Matchers for overflow check patterns: e.g. (a + b) u< a
1281//
1282
1283template <typename LHS_t, typename RHS_t, typename Sum_t>
1284struct UAddWithOverflow_match {
1285 LHS_t L;
1286 RHS_t R;
1287 Sum_t S;
1288
1289 UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
1290 : L(L), R(R), S(S) {}
1291
1292 template <typename OpTy> bool match(OpTy *V) {
1293 Value *ICmpLHS, *ICmpRHS;
1294 ICmpInst::Predicate Pred;
1295 if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
1296 return false;
1297
1298 Value *AddLHS, *AddRHS;
1299 auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
1300
1301 // (a + b) u< a, (a + b) u< b
1302 if (Pred == ICmpInst::ICMP_ULT)
1303 if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
1304 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1305
1306 // a >u (a + b), b >u (a + b)
1307 if (Pred == ICmpInst::ICMP_UGT)
1308 if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
1309 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1310
1311 return false;
1312 }
1313};
1314
1315/// \brief Match an icmp instruction checking for unsigned overflow on addition.
1316///
1317/// S is matched to the addition whose result is being checked for overflow, and
1318/// L and R are matched to the LHS and RHS of S.
1319template <typename LHS_t, typename RHS_t, typename Sum_t>
1320UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
1321m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
1322 return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
1323}
1324
1325template <typename Opnd_t> struct Argument_match {
1326 unsigned OpI;
1327 Opnd_t Val;
1328
1329 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
1330
1331 template <typename OpTy> bool match(OpTy *V) {
1332 CallSite CS(V);
1333 return CS.isCall() && Val.match(CS.getArgument(OpI));
1334 }
1335};
1336
1337/// \brief Match an argument.
1338template <unsigned OpI, typename Opnd_t>
1339inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
1340 return Argument_match<Opnd_t>(OpI, Op);
1341}
1342
1343/// \brief Intrinsic matchers.
1344struct IntrinsicID_match {
1345 unsigned ID;
1346
1347 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
1348
1349 template <typename OpTy> bool match(OpTy *V) {
1350 if (const auto *CI = dyn_cast<CallInst>(V))
1351 if (const auto *F = CI->getCalledFunction())
1352 return F->getIntrinsicID() == ID;
1353 return false;
1354 }
1355};
1356
1357/// Intrinsic matches are combinations of ID matchers, and argument
1358/// matchers. Higher arity matcher are defined recursively in terms of and-ing
1359/// them with lower arity matchers. Here's some convenient typedefs for up to
1360/// several arguments, and more can be added as needed
1361template <typename T0 = void, typename T1 = void, typename T2 = void,
1362 typename T3 = void, typename T4 = void, typename T5 = void,
1363 typename T6 = void, typename T7 = void, typename T8 = void,
1364 typename T9 = void, typename T10 = void>
1365struct m_Intrinsic_Ty;
1366template <typename T0> struct m_Intrinsic_Ty<T0> {
1367 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
1368};
1369template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
1370 using Ty =
1371 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
1372};
1373template <typename T0, typename T1, typename T2>
1374struct m_Intrinsic_Ty<T0, T1, T2> {
1375 using Ty =
1376 match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
1377 Argument_match<T2>>;
1378};
1379template <typename T0, typename T1, typename T2, typename T3>
1380struct m_Intrinsic_Ty<T0, T1, T2, T3> {
1381 using Ty =
1382 match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
1383 Argument_match<T3>>;
1384};
1385
1386/// \brief Match intrinsic calls like this:
1387/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
1388template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
1389 return IntrinsicID_match(IntrID);
1390}
1391
1392template <Intrinsic::ID IntrID, typename T0>
1393inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
1394 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
1395}
1396
1397template <Intrinsic::ID IntrID, typename T0, typename T1>
1398inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
1399 const T1 &Op1) {
1400 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
1401}
1402
1403template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
1404inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
1405m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
1406 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
1407}
1408
1409template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
1410 typename T3>
1411inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
1412m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
1413 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
1414}
1415
1416// Helper intrinsic matching specializations.
1417template <typename Opnd0>
1418inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
1419 return m_Intrinsic<Intrinsic::bitreverse>(Op0);
1420}
1421
1422template <typename Opnd0>
1423inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
1424 return m_Intrinsic<Intrinsic::bswap>(Op0);
1425}
1426
1427template <typename Opnd0, typename Opnd1>
1428inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
1429 const Opnd1 &Op1) {
1430 return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
1431}
1432
1433template <typename Opnd0, typename Opnd1>
1434inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
1435 const Opnd1 &Op1) {
1436 return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
1437}
1438
1439template <typename Opnd_t> struct Signum_match {
1440 Opnd_t Val;
1441 Signum_match(const Opnd_t &V) : Val(V) {}
1442
1443 template <typename OpTy> bool match(OpTy *V) {
1444 unsigned TypeSize = V->getType()->getScalarSizeInBits();
1445 if (TypeSize == 0)
1446 return false;
1447
1448 unsigned ShiftWidth = TypeSize - 1;
1449 Value *OpL = nullptr, *OpR = nullptr;
1450
1451 // This is the representation of signum we match:
1452 //
1453 // signum(x) == (x >> 63) | (-x >>u 63)
1454 //
1455 // An i1 value is its own signum, so it's correct to match
1456 //
1457 // signum(x) == (x >> 0) | (-x >>u 0)
1458 //
1459 // for i1 values.
1460
1461 auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
1462 auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
1463 auto Signum = m_Or(LHS, RHS);
1464
1465 return Signum.match(V) && OpL == OpR && Val.match(OpL);
1466 }
1467};
1468
1469/// \brief Matches a signum pattern.
1470///
1471/// signum(x) =
1472/// x > 0 -> 1
1473/// x == 0 -> 0
1474/// x < 0 -> -1
1475template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
1476 return Signum_match<Val_t>(V);
1477}
1478
1479//===----------------------------------------------------------------------===//
1480// Matchers for two-operands operators with the operators in either order
1481//
1482
1483/// \brief Matches a BinaryOperator with LHS and RHS in either order.
1484template <typename LHS, typename RHS>
1485inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
1486 return AnyBinaryOp_match<LHS, RHS, true>(L, R);
1487}
1488
1489/// \brief Matches an ICmp with a predicate over LHS and RHS in either order.
1490/// Does not swap the predicate.
1491template <typename LHS, typename RHS>
1492inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
1493m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1494 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
1495 R);
1496}
1497
1498/// \brief Matches a Add with LHS and RHS in either order.
1499template <typename LHS, typename RHS>
1500inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
1501 const RHS &R) {
1502 return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
1503}
1504
1505/// \brief Matches a Mul with LHS and RHS in either order.
1506template <typename LHS, typename RHS>
1507inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
1508 const RHS &R) {
1509 return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
1510}
1511
1512/// \brief Matches an And with LHS and RHS in either order.
1513template <typename LHS, typename RHS>
1514inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
1515 const RHS &R) {
1516 return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
1517}
1518
1519/// \brief Matches an Or with LHS and RHS in either order.
1520template <typename LHS, typename RHS>
1521inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
1522 const RHS &R) {
1523 return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
1524}
1525
1526/// \brief Matches an Xor with LHS and RHS in either order.
1527template <typename LHS, typename RHS>
1528inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
1529 const RHS &R) {
1530 return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
1531}
1532
1533/// Matches an SMin with LHS and RHS in either order.
1534template <typename LHS, typename RHS>
1535inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
1536m_c_SMin(const LHS &L, const RHS &R) {
1537 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
1538}
1539/// Matches an SMax with LHS and RHS in either order.
1540template <typename LHS, typename RHS>
1541inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
1542m_c_SMax(const LHS &L, const RHS &R) {
1543 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
1544}
1545/// Matches a UMin with LHS and RHS in either order.
1546template <typename LHS, typename RHS>
1547inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
1548m_c_UMin(const LHS &L, const RHS &R) {
1549 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
1550}
1551/// Matches a UMax with LHS and RHS in either order.
1552template <typename LHS, typename RHS>
1553inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
1554m_c_UMax(const LHS &L, const RHS &R) {
1555 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
1556}
1557
1558} // end namespace PatternMatch
1559} // end namespace llvm
1560
1561#endif // LLVM_IR_PATTERNMATCH_H