Bug Summary

File:lib/Analysis/InstructionSimplify.cpp
Warning:line 413, column 20
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InstructionSimplify.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Analysis -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp -faddrsig

/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp

1//===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements routines for folding instructions into simpler forms
11// that do not require creating new instructions. This does constant folding
12// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
13// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
14// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
15// simplified: This is usually true and assuming it simplifies the logic (if
16// they have not been simplified then results are correct but maybe suboptimal).
17//
18//===----------------------------------------------------------------------===//
19
20#include "llvm/Analysis/InstructionSimplify.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/Analysis/AssumptionCache.h"
25#include "llvm/Analysis/CaptureTracking.h"
26#include "llvm/Analysis/CmpInstAnalysis.h"
27#include "llvm/Analysis/ConstantFolding.h"
28#include "llvm/Analysis/LoopAnalysisManager.h"
29#include "llvm/Analysis/MemoryBuiltins.h"
30#include "llvm/Analysis/ValueTracking.h"
31#include "llvm/Analysis/VectorUtils.h"
32#include "llvm/IR/ConstantRange.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/Dominators.h"
35#include "llvm/IR/GetElementPtrTypeIterator.h"
36#include "llvm/IR/GlobalAlias.h"
37#include "llvm/IR/Operator.h"
38#include "llvm/IR/PatternMatch.h"
39#include "llvm/IR/ValueHandle.h"
40#include "llvm/Support/KnownBits.h"
41#include <algorithm>
42using namespace llvm;
43using namespace llvm::PatternMatch;
44
45#define DEBUG_TYPE"instsimplify" "instsimplify"
46
47enum { RecursionLimit = 3 };
48
49STATISTIC(NumExpand, "Number of expansions")static llvm::Statistic NumExpand = {"instsimplify", "NumExpand"
, "Number of expansions", {0}, {false}}
;
50STATISTIC(NumReassoc, "Number of reassociations")static llvm::Statistic NumReassoc = {"instsimplify", "NumReassoc"
, "Number of reassociations", {0}, {false}}
;
51
52static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
53static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
54 unsigned);
55static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &,
56 const SimplifyQuery &, unsigned);
57static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
58 unsigned);
59static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
60 const SimplifyQuery &Q, unsigned MaxRecurse);
61static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
62static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
63static Value *SimplifyCastInst(unsigned, Value *, Type *,
64 const SimplifyQuery &, unsigned);
65static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &,
66 unsigned);
67
68/// Fold
69/// %A = icmp ne/eq i8 %X, %V1
70/// %B = icmp ne/eq i8 %X, %V2
71/// %C = or/and i1 %A, %B
72/// %D = select i1 %C, i8 %X, i8 %V1
73/// To
74/// %X/%V1
75static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
76 Value *FalseVal) {
77 BinaryOperator::BinaryOps BinOpCode;
78 if (auto *BO = dyn_cast<BinaryOperator>(Cond))
79 BinOpCode = BO->getOpcode();
80 else
81 return nullptr;
82
83 CmpInst::Predicate ExpectedPred;
84 if (BinOpCode == BinaryOperator::Or) {
85 ExpectedPred = ICmpInst::ICMP_NE;
86 } else if (BinOpCode == BinaryOperator::And) {
87 ExpectedPred = ICmpInst::ICMP_EQ;
88 } else
89 return nullptr;
90
91 CmpInst::Predicate Pred1, Pred2;
92 if (!match(
93 Cond,
94 m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal), m_Specific(FalseVal)),
95 m_c_ICmp(Pred2, m_Specific(TrueVal), m_Value()))) ||
96 Pred1 != Pred2 || Pred1 != ExpectedPred)
97 return nullptr;
98
99 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
100}
101
102/// For a boolean type or a vector of boolean type, return false or a vector
103/// with every element false.
104static Constant *getFalse(Type *Ty) {
105 return ConstantInt::getFalse(Ty);
106}
107
108/// For a boolean type or a vector of boolean type, return true or a vector
109/// with every element true.
110static Constant *getTrue(Type *Ty) {
111 return ConstantInt::getTrue(Ty);
112}
113
114/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
115static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
116 Value *RHS) {
117 CmpInst *Cmp = dyn_cast<CmpInst>(V);
118 if (!Cmp)
119 return false;
120 CmpInst::Predicate CPred = Cmp->getPredicate();
121 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
122 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
123 return true;
124 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
125 CRHS == LHS;
126}
127
128/// Does the given value dominate the specified phi node?
129static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
130 Instruction *I = dyn_cast<Instruction>(V);
131 if (!I)
132 // Arguments and constants dominate all instructions.
133 return true;
134
135 // If we are processing instructions (and/or basic blocks) that have not been
136 // fully added to a function, the parent nodes may still be null. Simply
137 // return the conservative answer in these cases.
138 if (!I->getParent() || !P->getParent() || !I->getFunction())
139 return false;
140
141 // If we have a DominatorTree then do a precise test.
142 if (DT)
143 return DT->dominates(I, P);
144
145 // Otherwise, if the instruction is in the entry block and is not an invoke,
146 // then it obviously dominates all phi nodes.
147 if (I->getParent() == &I->getFunction()->getEntryBlock() &&
148 !isa<InvokeInst>(I))
149 return true;
150
151 return false;
152}
153
154/// Simplify "A op (B op' C)" by distributing op over op', turning it into
155/// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
156/// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
157/// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
158/// Returns the simplified value, or null if no simplification was performed.
159static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
160 Instruction::BinaryOps OpcodeToExpand,
161 const SimplifyQuery &Q, unsigned MaxRecurse) {
162 // Recursion is always used, so bail out at once if we already hit the limit.
163 if (!MaxRecurse--)
164 return nullptr;
165
166 // Check whether the expression has the form "(A op' B) op C".
167 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
168 if (Op0->getOpcode() == OpcodeToExpand) {
169 // It does! Try turning it into "(A op C) op' (B op C)".
170 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
171 // Do "A op C" and "B op C" both simplify?
172 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
173 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
174 // They do! Return "L op' R" if it simplifies or is already available.
175 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
176 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
177 && L == B && R == A)) {
178 ++NumExpand;
179 return LHS;
180 }
181 // Otherwise return "L op' R" if it simplifies.
182 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
183 ++NumExpand;
184 return V;
185 }
186 }
187 }
188
189 // Check whether the expression has the form "A op (B op' C)".
190 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
191 if (Op1->getOpcode() == OpcodeToExpand) {
192 // It does! Try turning it into "(A op B) op' (A op C)".
193 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
194 // Do "A op B" and "A op C" both simplify?
195 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
196 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
197 // They do! Return "L op' R" if it simplifies or is already available.
198 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
199 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
200 && L == C && R == B)) {
201 ++NumExpand;
202 return RHS;
203 }
204 // Otherwise return "L op' R" if it simplifies.
205 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
206 ++NumExpand;
207 return V;
208 }
209 }
210 }
211
212 return nullptr;
213}
214
215/// Generic simplifications for associative binary operations.
216/// Returns the simpler value, or null if none was found.
217static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
218 Value *LHS, Value *RHS,
219 const SimplifyQuery &Q,
220 unsigned MaxRecurse) {
221 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!")(static_cast <bool> (Instruction::isAssociative(Opcode)
&& "Not an associative operation!") ? void (0) : __assert_fail
("Instruction::isAssociative(Opcode) && \"Not an associative operation!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 221, __extension__ __PRETTY_FUNCTION__))
;
222
223 // Recursion is always used, so bail out at once if we already hit the limit.
224 if (!MaxRecurse--)
225 return nullptr;
226
227 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
228 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
229
230 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
231 if (Op0 && Op0->getOpcode() == Opcode) {
232 Value *A = Op0->getOperand(0);
233 Value *B = Op0->getOperand(1);
234 Value *C = RHS;
235
236 // Does "B op C" simplify?
237 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
238 // It does! Return "A op V" if it simplifies or is already available.
239 // If V equals B then "A op V" is just the LHS.
240 if (V == B) return LHS;
241 // Otherwise return "A op V" if it simplifies.
242 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
243 ++NumReassoc;
244 return W;
245 }
246 }
247 }
248
249 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
250 if (Op1 && Op1->getOpcode() == Opcode) {
251 Value *A = LHS;
252 Value *B = Op1->getOperand(0);
253 Value *C = Op1->getOperand(1);
254
255 // Does "A op B" simplify?
256 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
257 // It does! Return "V op C" if it simplifies or is already available.
258 // If V equals B then "V op C" is just the RHS.
259 if (V == B) return RHS;
260 // Otherwise return "V op C" if it simplifies.
261 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
262 ++NumReassoc;
263 return W;
264 }
265 }
266 }
267
268 // The remaining transforms require commutativity as well as associativity.
269 if (!Instruction::isCommutative(Opcode))
270 return nullptr;
271
272 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
273 if (Op0 && Op0->getOpcode() == Opcode) {
274 Value *A = Op0->getOperand(0);
275 Value *B = Op0->getOperand(1);
276 Value *C = RHS;
277
278 // Does "C op A" simplify?
279 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
280 // It does! Return "V op B" if it simplifies or is already available.
281 // If V equals A then "V op B" is just the LHS.
282 if (V == A) return LHS;
283 // Otherwise return "V op B" if it simplifies.
284 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
285 ++NumReassoc;
286 return W;
287 }
288 }
289 }
290
291 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
292 if (Op1 && Op1->getOpcode() == Opcode) {
293 Value *A = LHS;
294 Value *B = Op1->getOperand(0);
295 Value *C = Op1->getOperand(1);
296
297 // Does "C op A" simplify?
298 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
299 // It does! Return "B op V" if it simplifies or is already available.
300 // If V equals C then "B op V" is just the RHS.
301 if (V == C) return RHS;
302 // Otherwise return "B op V" if it simplifies.
303 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
304 ++NumReassoc;
305 return W;
306 }
307 }
308 }
309
310 return nullptr;
311}
312
313/// In the case of a binary operation with a select instruction as an operand,
314/// try to simplify the binop by seeing whether evaluating it on both branches
315/// of the select results in the same value. Returns the common value if so,
316/// otherwise returns null.
317static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
318 Value *RHS, const SimplifyQuery &Q,
319 unsigned MaxRecurse) {
320 // Recursion is always used, so bail out at once if we already hit the limit.
321 if (!MaxRecurse--)
322 return nullptr;
323
324 SelectInst *SI;
325 if (isa<SelectInst>(LHS)) {
326 SI = cast<SelectInst>(LHS);
327 } else {
328 assert(isa<SelectInst>(RHS) && "No select instruction operand!")(static_cast <bool> (isa<SelectInst>(RHS) &&
"No select instruction operand!") ? void (0) : __assert_fail
("isa<SelectInst>(RHS) && \"No select instruction operand!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 328, __extension__ __PRETTY_FUNCTION__))
;
329 SI = cast<SelectInst>(RHS);
330 }
331
332 // Evaluate the BinOp on the true and false branches of the select.
333 Value *TV;
334 Value *FV;
335 if (SI == LHS) {
336 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
337 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
338 } else {
339 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
340 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
341 }
342
343 // If they simplified to the same value, then return the common value.
344 // If they both failed to simplify then return null.
345 if (TV == FV)
346 return TV;
347
348 // If one branch simplified to undef, return the other one.
349 if (TV && isa<UndefValue>(TV))
350 return FV;
351 if (FV && isa<UndefValue>(FV))
352 return TV;
353
354 // If applying the operation did not change the true and false select values,
355 // then the result of the binop is the select itself.
356 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
357 return SI;
358
359 // If one branch simplified and the other did not, and the simplified
360 // value is equal to the unsimplified one, return the simplified value.
361 // For example, select (cond, X, X & Z) & Z -> X & Z.
362 if ((FV && !TV) || (TV && !FV)) {
363 // Check that the simplified value has the form "X op Y" where "op" is the
364 // same as the original operation.
365 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
366 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
367 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
368 // We already know that "op" is the same as for the simplified value. See
369 // if the operands match too. If so, return the simplified value.
370 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
371 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
372 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
373 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
374 Simplified->getOperand(1) == UnsimplifiedRHS)
375 return Simplified;
376 if (Simplified->isCommutative() &&
377 Simplified->getOperand(1) == UnsimplifiedLHS &&
378 Simplified->getOperand(0) == UnsimplifiedRHS)
379 return Simplified;
380 }
381 }
382
383 return nullptr;
384}
385
386/// In the case of a comparison with a select instruction, try to simplify the
387/// comparison by seeing whether both branches of the select result in the same
388/// value. Returns the common value if so, otherwise returns null.
389static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
390 Value *RHS, const SimplifyQuery &Q,
391 unsigned MaxRecurse) {
392 // Recursion is always used, so bail out at once if we already hit the limit.
393 if (!MaxRecurse--)
18
Taking false branch
394 return nullptr;
395
396 // Make sure the select is on the LHS.
397 if (!isa<SelectInst>(LHS)) {
19
Taking true branch
398 std::swap(LHS, RHS);
399 Pred = CmpInst::getSwappedPredicate(Pred);
400 }
401 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!")(static_cast <bool> (isa<SelectInst>(LHS) &&
"Not comparing with a select instruction!") ? void (0) : __assert_fail
("isa<SelectInst>(LHS) && \"Not comparing with a select instruction!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 401, __extension__ __PRETTY_FUNCTION__))
;
402 SelectInst *SI = cast<SelectInst>(LHS);
403 Value *Cond = SI->getCondition();
20
Calling 'SelectInst::getCondition'
23
Returning from 'SelectInst::getCondition'
24
'Cond' initialized here
404 Value *TV = SI->getTrueValue();
405 Value *FV = SI->getFalseValue();
406
407 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
408 // Does "cmp TV, RHS" simplify?
409 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse);
410 if (TCmp == Cond) {
25
Assuming 'TCmp' is equal to 'Cond'
26
Assuming pointer value is null
27
Taking true branch
411 // It not only simplified, it simplified to the select condition. Replace
412 // it with 'true'.
413 TCmp = getTrue(Cond->getType());
28
Called C++ object pointer is null
414 } else if (!TCmp) {
415 // It didn't simplify. However if "cmp TV, RHS" is equal to the select
416 // condition then we can replace it with 'true'. Otherwise give up.
417 if (!isSameCompare(Cond, Pred, TV, RHS))
418 return nullptr;
419 TCmp = getTrue(Cond->getType());
420 }
421
422 // Does "cmp FV, RHS" simplify?
423 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse);
424 if (FCmp == Cond) {
425 // It not only simplified, it simplified to the select condition. Replace
426 // it with 'false'.
427 FCmp = getFalse(Cond->getType());
428 } else if (!FCmp) {
429 // It didn't simplify. However if "cmp FV, RHS" is equal to the select
430 // condition then we can replace it with 'false'. Otherwise give up.
431 if (!isSameCompare(Cond, Pred, FV, RHS))
432 return nullptr;
433 FCmp = getFalse(Cond->getType());
434 }
435
436 // If both sides simplified to the same value, then use it as the result of
437 // the original comparison.
438 if (TCmp == FCmp)
439 return TCmp;
440
441 // The remaining cases only make sense if the select condition has the same
442 // type as the result of the comparison, so bail out if this is not so.
443 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
444 return nullptr;
445 // If the false value simplified to false, then the result of the compare
446 // is equal to "Cond && TCmp". This also catches the case when the false
447 // value simplified to false and the true value to true, returning "Cond".
448 if (match(FCmp, m_Zero()))
449 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
450 return V;
451 // If the true value simplified to true, then the result of the compare
452 // is equal to "Cond || FCmp".
453 if (match(TCmp, m_One()))
454 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
455 return V;
456 // Finally, if the false value simplified to true and the true value to
457 // false, then the result of the compare is equal to "!Cond".
458 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
459 if (Value *V =
460 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
461 Q, MaxRecurse))
462 return V;
463
464 return nullptr;
465}
466
467/// In the case of a binary operation with an operand that is a PHI instruction,
468/// try to simplify the binop by seeing whether evaluating it on the incoming
469/// phi values yields the same result for every value. If so returns the common
470/// value, otherwise returns null.
471static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
472 Value *RHS, const SimplifyQuery &Q,
473 unsigned MaxRecurse) {
474 // Recursion is always used, so bail out at once if we already hit the limit.
475 if (!MaxRecurse--)
476 return nullptr;
477
478 PHINode *PI;
479 if (isa<PHINode>(LHS)) {
480 PI = cast<PHINode>(LHS);
481 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
482 if (!valueDominatesPHI(RHS, PI, Q.DT))
483 return nullptr;
484 } else {
485 assert(isa<PHINode>(RHS) && "No PHI instruction operand!")(static_cast <bool> (isa<PHINode>(RHS) &&
"No PHI instruction operand!") ? void (0) : __assert_fail ("isa<PHINode>(RHS) && \"No PHI instruction operand!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 485, __extension__ __PRETTY_FUNCTION__))
;
486 PI = cast<PHINode>(RHS);
487 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
488 if (!valueDominatesPHI(LHS, PI, Q.DT))
489 return nullptr;
490 }
491
492 // Evaluate the BinOp on the incoming phi values.
493 Value *CommonValue = nullptr;
494 for (Value *Incoming : PI->incoming_values()) {
495 // If the incoming value is the phi node itself, it can safely be skipped.
496 if (Incoming == PI) continue;
497 Value *V = PI == LHS ?
498 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
499 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
500 // If the operation failed to simplify, or simplified to a different value
501 // to previously, then give up.
502 if (!V || (CommonValue && V != CommonValue))
503 return nullptr;
504 CommonValue = V;
505 }
506
507 return CommonValue;
508}
509
510/// In the case of a comparison with a PHI instruction, try to simplify the
511/// comparison by seeing whether comparing with all of the incoming phi values
512/// yields the same result every time. If so returns the common result,
513/// otherwise returns null.
514static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
515 const SimplifyQuery &Q, unsigned MaxRecurse) {
516 // Recursion is always used, so bail out at once if we already hit the limit.
517 if (!MaxRecurse--)
518 return nullptr;
519
520 // Make sure the phi is on the LHS.
521 if (!isa<PHINode>(LHS)) {
522 std::swap(LHS, RHS);
523 Pred = CmpInst::getSwappedPredicate(Pred);
524 }
525 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!")(static_cast <bool> (isa<PHINode>(LHS) &&
"Not comparing with a phi instruction!") ? void (0) : __assert_fail
("isa<PHINode>(LHS) && \"Not comparing with a phi instruction!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 525, __extension__ __PRETTY_FUNCTION__))
;
526 PHINode *PI = cast<PHINode>(LHS);
527
528 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
529 if (!valueDominatesPHI(RHS, PI, Q.DT))
530 return nullptr;
531
532 // Evaluate the BinOp on the incoming phi values.
533 Value *CommonValue = nullptr;
534 for (Value *Incoming : PI->incoming_values()) {
535 // If the incoming value is the phi node itself, it can safely be skipped.
536 if (Incoming == PI) continue;
537 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
538 // If the operation failed to simplify, or simplified to a different value
539 // to previously, then give up.
540 if (!V || (CommonValue && V != CommonValue))
541 return nullptr;
542 CommonValue = V;
543 }
544
545 return CommonValue;
546}
547
548static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
549 Value *&Op0, Value *&Op1,
550 const SimplifyQuery &Q) {
551 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
552 if (auto *CRHS = dyn_cast<Constant>(Op1))
553 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
554
555 // Canonicalize the constant to the RHS if this is a commutative operation.
556 if (Instruction::isCommutative(Opcode))
557 std::swap(Op0, Op1);
558 }
559 return nullptr;
560}
561
562/// Given operands for an Add, see if we can fold the result.
563/// If not, this returns null.
564static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
565 const SimplifyQuery &Q, unsigned MaxRecurse) {
566 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
567 return C;
568
569 // X + undef -> undef
570 if (match(Op1, m_Undef()))
571 return Op1;
572
573 // X + 0 -> X
574 if (match(Op1, m_Zero()))
575 return Op0;
576
577 // If two operands are negative, return 0.
578 if (isKnownNegation(Op0, Op1))
579 return Constant::getNullValue(Op0->getType());
580
581 // X + (Y - X) -> Y
582 // (Y - X) + X -> Y
583 // Eg: X + -X -> 0
584 Value *Y = nullptr;
585 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
586 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
587 return Y;
588
589 // X + ~X -> -1 since ~X = -X-1
590 Type *Ty = Op0->getType();
591 if (match(Op0, m_Not(m_Specific(Op1))) ||
592 match(Op1, m_Not(m_Specific(Op0))))
593 return Constant::getAllOnesValue(Ty);
594
595 // add nsw/nuw (xor Y, signmask), signmask --> Y
596 // The no-wrapping add guarantees that the top bit will be set by the add.
597 // Therefore, the xor must be clearing the already set sign bit of Y.
598 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
599 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
600 return Y;
601
602 // add nuw %x, -1 -> -1, because %x can only be 0.
603 if (IsNUW && match(Op1, m_AllOnes()))
604 return Op1; // Which is -1.
605
606 /// i1 add -> xor.
607 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
608 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
609 return V;
610
611 // Try some generic simplifications for associative operations.
612 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
613 MaxRecurse))
614 return V;
615
616 // Threading Add over selects and phi nodes is pointless, so don't bother.
617 // Threading over the select in "A + select(cond, B, C)" means evaluating
618 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
619 // only if B and C are equal. If B and C are equal then (since we assume
620 // that operands have already been simplified) "select(cond, B, C)" should
621 // have been simplified to the common value of B and C already. Analysing
622 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
623 // for threading over phi nodes.
624
625 return nullptr;
626}
627
628Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
629 const SimplifyQuery &Query) {
630 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
631}
632
633/// Compute the base pointer and cumulative constant offsets for V.
634///
635/// This strips all constant offsets off of V, leaving it the base pointer, and
636/// accumulates the total constant offset applied in the returned constant. It
637/// returns 0 if V is not a pointer, and returns the constant '0' if there are
638/// no constant offsets applied.
639///
640/// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
641/// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
642/// folding.
643static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
644 bool AllowNonInbounds = false) {
645 assert(V->getType()->isPtrOrPtrVectorTy())(static_cast <bool> (V->getType()->isPtrOrPtrVectorTy
()) ? void (0) : __assert_fail ("V->getType()->isPtrOrPtrVectorTy()"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 645, __extension__ __PRETTY_FUNCTION__))
;
646
647 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
648 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
649
650 // Even though we don't look through PHI nodes, we could be called on an
651 // instruction in an unreachable block, which may be on a cycle.
652 SmallPtrSet<Value *, 4> Visited;
653 Visited.insert(V);
654 do {
655 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
656 if ((!AllowNonInbounds && !GEP->isInBounds()) ||
657 !GEP->accumulateConstantOffset(DL, Offset))
658 break;
659 V = GEP->getPointerOperand();
660 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
661 V = cast<Operator>(V)->getOperand(0);
662 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
663 if (GA->isInterposable())
664 break;
665 V = GA->getAliasee();
666 } else {
667 if (auto CS = CallSite(V))
668 if (Value *RV = CS.getReturnedArgOperand()) {
669 V = RV;
670 continue;
671 }
672 break;
673 }
674 assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!")(static_cast <bool> (V->getType()->isPtrOrPtrVectorTy
() && "Unexpected operand type!") ? void (0) : __assert_fail
("V->getType()->isPtrOrPtrVectorTy() && \"Unexpected operand type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 674, __extension__ __PRETTY_FUNCTION__))
;
675 } while (Visited.insert(V).second);
676
677 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
678 if (V->getType()->isVectorTy())
679 return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
680 OffsetIntPtr);
681 return OffsetIntPtr;
682}
683
684/// Compute the constant difference between two pointer values.
685/// If the difference is not a constant, returns zero.
686static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
687 Value *RHS) {
688 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
689 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
690
691 // If LHS and RHS are not related via constant offsets to the same base
692 // value, there is nothing we can do here.
693 if (LHS != RHS)
694 return nullptr;
695
696 // Otherwise, the difference of LHS - RHS can be computed as:
697 // LHS - RHS
698 // = (LHSOffset + Base) - (RHSOffset + Base)
699 // = LHSOffset - RHSOffset
700 return ConstantExpr::getSub(LHSOffset, RHSOffset);
701}
702
703/// Given operands for a Sub, see if we can fold the result.
704/// If not, this returns null.
705static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
706 const SimplifyQuery &Q, unsigned MaxRecurse) {
707 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
708 return C;
709
710 // X - undef -> undef
711 // undef - X -> undef
712 if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
713 return UndefValue::get(Op0->getType());
714
715 // X - 0 -> X
716 if (match(Op1, m_Zero()))
717 return Op0;
718
719 // X - X -> 0
720 if (Op0 == Op1)
721 return Constant::getNullValue(Op0->getType());
722
723 // Is this a negation?
724 if (match(Op0, m_Zero())) {
725 // 0 - X -> 0 if the sub is NUW.
726 if (isNUW)
727 return Constant::getNullValue(Op0->getType());
728
729 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
730 if (Known.Zero.isMaxSignedValue()) {
731 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
732 // Op1 must be 0 because negating the minimum signed value is undefined.
733 if (isNSW)
734 return Constant::getNullValue(Op0->getType());
735
736 // 0 - X -> X if X is 0 or the minimum signed value.
737 return Op1;
738 }
739 }
740
741 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
742 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
743 Value *X = nullptr, *Y = nullptr, *Z = Op1;
744 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
745 // See if "V === Y - Z" simplifies.
746 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
747 // It does! Now see if "X + V" simplifies.
748 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
749 // It does, we successfully reassociated!
750 ++NumReassoc;
751 return W;
752 }
753 // See if "V === X - Z" simplifies.
754 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
755 // It does! Now see if "Y + V" simplifies.
756 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
757 // It does, we successfully reassociated!
758 ++NumReassoc;
759 return W;
760 }
761 }
762
763 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
764 // For example, X - (X + 1) -> -1
765 X = Op0;
766 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
767 // See if "V === X - Y" simplifies.
768 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
769 // It does! Now see if "V - Z" simplifies.
770 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
771 // It does, we successfully reassociated!
772 ++NumReassoc;
773 return W;
774 }
775 // See if "V === X - Z" simplifies.
776 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
777 // It does! Now see if "V - Y" simplifies.
778 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
779 // It does, we successfully reassociated!
780 ++NumReassoc;
781 return W;
782 }
783 }
784
785 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
786 // For example, X - (X - Y) -> Y.
787 Z = Op0;
788 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
789 // See if "V === Z - X" simplifies.
790 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
791 // It does! Now see if "V + Y" simplifies.
792 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
793 // It does, we successfully reassociated!
794 ++NumReassoc;
795 return W;
796 }
797
798 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
799 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
800 match(Op1, m_Trunc(m_Value(Y))))
801 if (X->getType() == Y->getType())
802 // See if "V === X - Y" simplifies.
803 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
804 // It does! Now see if "trunc V" simplifies.
805 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
806 Q, MaxRecurse - 1))
807 // It does, return the simplified "trunc V".
808 return W;
809
810 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
811 if (match(Op0, m_PtrToInt(m_Value(X))) &&
812 match(Op1, m_PtrToInt(m_Value(Y))))
813 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
814 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
815
816 // i1 sub -> xor.
817 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
818 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
819 return V;
820
821 // Threading Sub over selects and phi nodes is pointless, so don't bother.
822 // Threading over the select in "A - select(cond, B, C)" means evaluating
823 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
824 // only if B and C are equal. If B and C are equal then (since we assume
825 // that operands have already been simplified) "select(cond, B, C)" should
826 // have been simplified to the common value of B and C already. Analysing
827 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
828 // for threading over phi nodes.
829
830 return nullptr;
831}
832
833Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
834 const SimplifyQuery &Q) {
835 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
836}
837
838/// Given operands for a Mul, see if we can fold the result.
839/// If not, this returns null.
840static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
841 unsigned MaxRecurse) {
842 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
843 return C;
844
845 // X * undef -> 0
846 // X * 0 -> 0
847 if (match(Op1, m_CombineOr(m_Undef(), m_Zero())))
848 return Constant::getNullValue(Op0->getType());
849
850 // X * 1 -> X
851 if (match(Op1, m_One()))
852 return Op0;
853
854 // (X / Y) * Y -> X if the division is exact.
855 Value *X = nullptr;
856 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
857 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
858 return X;
859
860 // i1 mul -> and.
861 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
862 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
863 return V;
864
865 // Try some generic simplifications for associative operations.
866 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
867 MaxRecurse))
868 return V;
869
870 // Mul distributes over Add. Try some generic simplifications based on this.
871 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
872 Q, MaxRecurse))
873 return V;
874
875 // If the operation is with the result of a select instruction, check whether
876 // operating on either branch of the select always yields the same value.
877 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
878 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
879 MaxRecurse))
880 return V;
881
882 // If the operation is with the result of a phi instruction, check whether
883 // operating on all incoming values of the phi always yields the same value.
884 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
885 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
886 MaxRecurse))
887 return V;
888
889 return nullptr;
890}
891
892Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
893 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
894}
895
896/// Check for common or similar folds of integer division or integer remainder.
897/// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
898static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) {
899 Type *Ty = Op0->getType();
900
901 // X / undef -> undef
902 // X % undef -> undef
903 if (match(Op1, m_Undef()))
904 return Op1;
905
906 // X / 0 -> undef
907 // X % 0 -> undef
908 // We don't need to preserve faults!
909 if (match(Op1, m_Zero()))
910 return UndefValue::get(Ty);
911
912 // If any element of a constant divisor vector is zero or undef, the whole op
913 // is undef.
914 auto *Op1C = dyn_cast<Constant>(Op1);
915 if (Op1C && Ty->isVectorTy()) {
916 unsigned NumElts = Ty->getVectorNumElements();
917 for (unsigned i = 0; i != NumElts; ++i) {
918 Constant *Elt = Op1C->getAggregateElement(i);
919 if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt)))
920 return UndefValue::get(Ty);
921 }
922 }
923
924 // undef / X -> 0
925 // undef % X -> 0
926 if (match(Op0, m_Undef()))
927 return Constant::getNullValue(Ty);
928
929 // 0 / X -> 0
930 // 0 % X -> 0
931 if (match(Op0, m_Zero()))
932 return Constant::getNullValue(Op0->getType());
933
934 // X / X -> 1
935 // X % X -> 0
936 if (Op0 == Op1)
937 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
938
939 // X / 1 -> X
940 // X % 1 -> 0
941 // If this is a boolean op (single-bit element type), we can't have
942 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
943 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
944 Value *X;
945 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) ||
946 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
947 return IsDiv ? Op0 : Constant::getNullValue(Ty);
948
949 return nullptr;
950}
951
952/// Given a predicate and two operands, return true if the comparison is true.
953/// This is a helper for div/rem simplification where we return some other value
954/// when we can prove a relationship between the operands.
955static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
956 const SimplifyQuery &Q, unsigned MaxRecurse) {
957 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
958 Constant *C = dyn_cast_or_null<Constant>(V);
959 return (C && C->isAllOnesValue());
960}
961
962/// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
963/// to simplify X % Y to X.
964static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
965 unsigned MaxRecurse, bool IsSigned) {
966 // Recursion is always used, so bail out at once if we already hit the limit.
967 if (!MaxRecurse--)
968 return false;
969
970 if (IsSigned) {
971 // |X| / |Y| --> 0
972 //
973 // We require that 1 operand is a simple constant. That could be extended to
974 // 2 variables if we computed the sign bit for each.
975 //
976 // Make sure that a constant is not the minimum signed value because taking
977 // the abs() of that is undefined.
978 Type *Ty = X->getType();
979 const APInt *C;
980 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
981 // Is the variable divisor magnitude always greater than the constant
982 // dividend magnitude?
983 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
984 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
985 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
986 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
987 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
988 return true;
989 }
990 if (match(Y, m_APInt(C))) {
991 // Special-case: we can't take the abs() of a minimum signed value. If
992 // that's the divisor, then all we have to do is prove that the dividend
993 // is also not the minimum signed value.
994 if (C->isMinSignedValue())
995 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
996
997 // Is the variable dividend magnitude always less than the constant
998 // divisor magnitude?
999 // |X| < |C| --> X > -abs(C) and X < abs(C)
1000 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1001 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1002 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1003 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1004 return true;
1005 }
1006 return false;
1007 }
1008
1009 // IsSigned == false.
1010 // Is the dividend unsigned less than the divisor?
1011 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1012}
1013
1014/// These are simplifications common to SDiv and UDiv.
1015static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1016 const SimplifyQuery &Q, unsigned MaxRecurse) {
1017 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1018 return C;
1019
1020 if (Value *V = simplifyDivRem(Op0, Op1, true))
1021 return V;
1022
1023 bool IsSigned = Opcode == Instruction::SDiv;
1024
1025 // (X * Y) / Y -> X if the multiplication does not overflow.
1026 Value *X;
1027 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1028 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1029 // If the Mul does not overflow, then we are good to go.
1030 if ((IsSigned && Mul->hasNoSignedWrap()) ||
1031 (!IsSigned && Mul->hasNoUnsignedWrap()))
1032 return X;
1033 // If X has the form X = A / Y, then X * Y cannot overflow.
1034 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1035 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1)))))
1036 return X;
1037 }
1038
1039 // (X rem Y) / Y -> 0
1040 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1041 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1042 return Constant::getNullValue(Op0->getType());
1043
1044 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1045 ConstantInt *C1, *C2;
1046 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
1047 match(Op1, m_ConstantInt(C2))) {
1048 bool Overflow;
1049 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1050 if (Overflow)
1051 return Constant::getNullValue(Op0->getType());
1052 }
1053
1054 // If the operation is with the result of a select instruction, check whether
1055 // operating on either branch of the select always yields the same value.
1056 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1057 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1058 return V;
1059
1060 // If the operation is with the result of a phi instruction, check whether
1061 // operating on all incoming values of the phi always yields the same value.
1062 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1063 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1064 return V;
1065
1066 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1067 return Constant::getNullValue(Op0->getType());
1068
1069 return nullptr;
1070}
1071
1072/// These are simplifications common to SRem and URem.
1073static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1074 const SimplifyQuery &Q, unsigned MaxRecurse) {
1075 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1076 return C;
1077
1078 if (Value *V = simplifyDivRem(Op0, Op1, false))
1079 return V;
1080
1081 // (X % Y) % Y -> X % Y
1082 if ((Opcode == Instruction::SRem &&
1083 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1084 (Opcode == Instruction::URem &&
1085 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1086 return Op0;
1087
1088 // (X << Y) % X -> 0
1089 if ((Opcode == Instruction::SRem &&
1090 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1091 (Opcode == Instruction::URem &&
1092 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
1093 return Constant::getNullValue(Op0->getType());
1094
1095 // If the operation is with the result of a select instruction, check whether
1096 // operating on either branch of the select always yields the same value.
1097 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1098 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1099 return V;
1100
1101 // If the operation is with the result of a phi instruction, check whether
1102 // operating on all incoming values of the phi always yields the same value.
1103 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1104 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1105 return V;
1106
1107 // If X / Y == 0, then X % Y == X.
1108 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1109 return Op0;
1110
1111 return nullptr;
1112}
1113
1114/// Given operands for an SDiv, see if we can fold the result.
1115/// If not, this returns null.
1116static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1117 unsigned MaxRecurse) {
1118 // If two operands are negated and no signed overflow, return -1.
1119 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1120 return Constant::getAllOnesValue(Op0->getType());
1121
1122 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse);
1123}
1124
1125Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1126 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1127}
1128
1129/// Given operands for a UDiv, see if we can fold the result.
1130/// If not, this returns null.
1131static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1132 unsigned MaxRecurse) {
1133 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse);
1134}
1135
1136Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1137 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1138}
1139
1140/// Given operands for an SRem, see if we can fold the result.
1141/// If not, this returns null.
1142static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1143 unsigned MaxRecurse) {
1144 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1145 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1146 Value *X;
1147 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1148 return ConstantInt::getNullValue(Op0->getType());
1149
1150 // If the two operands are negated, return 0.
1151 if (isKnownNegation(Op0, Op1))
1152 return ConstantInt::getNullValue(Op0->getType());
1153
1154 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1155}
1156
1157Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1158 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1159}
1160
1161/// Given operands for a URem, see if we can fold the result.
1162/// If not, this returns null.
1163static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1164 unsigned MaxRecurse) {
1165 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1166}
1167
1168Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1169 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1170}
1171
1172/// Returns true if a shift by \c Amount always yields undef.
1173static bool isUndefShift(Value *Amount) {
1174 Constant *C = dyn_cast<Constant>(Amount);
1175 if (!C)
1176 return false;
1177
1178 // X shift by undef -> undef because it may shift by the bitwidth.
1179 if (isa<UndefValue>(C))
1180 return true;
1181
1182 // Shifting by the bitwidth or more is undefined.
1183 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1184 if (CI->getValue().getLimitedValue() >=
1185 CI->getType()->getScalarSizeInBits())
1186 return true;
1187
1188 // If all lanes of a vector shift are undefined the whole shift is.
1189 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1190 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
1191 if (!isUndefShift(C->getAggregateElement(I)))
1192 return false;
1193 return true;
1194 }
1195
1196 return false;
1197}
1198
1199/// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1200/// If not, this returns null.
1201static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1202 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) {
1203 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1204 return C;
1205
1206 // 0 shift by X -> 0
1207 if (match(Op0, m_Zero()))
1208 return Constant::getNullValue(Op0->getType());
1209
1210 // X shift by 0 -> X
1211 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1212 // would be poison.
1213 Value *X;
1214 if (match(Op1, m_Zero()) ||
1215 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1216 return Op0;
1217
1218 // Fold undefined shifts.
1219 if (isUndefShift(Op1))
1220 return UndefValue::get(Op0->getType());
1221
1222 // If the operation is with the result of a select instruction, check whether
1223 // operating on either branch of the select always yields the same value.
1224 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1225 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1226 return V;
1227
1228 // If the operation is with the result of a phi instruction, check whether
1229 // operating on all incoming values of the phi always yields the same value.
1230 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1231 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1232 return V;
1233
1234 // If any bits in the shift amount make that value greater than or equal to
1235 // the number of bits in the type, the shift is undefined.
1236 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1237 if (Known.One.getLimitedValue() >= Known.getBitWidth())
1238 return UndefValue::get(Op0->getType());
1239
1240 // If all valid bits in the shift amount are known zero, the first operand is
1241 // unchanged.
1242 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
1243 if (Known.countMinTrailingZeros() >= NumValidShiftBits)
1244 return Op0;
1245
1246 return nullptr;
1247}
1248
1249/// Given operands for an Shl, LShr or AShr, see if we can
1250/// fold the result. If not, this returns null.
1251static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1252 Value *Op1, bool isExact, const SimplifyQuery &Q,
1253 unsigned MaxRecurse) {
1254 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
1255 return V;
1256
1257 // X >> X -> 0
1258 if (Op0 == Op1)
1259 return Constant::getNullValue(Op0->getType());
1260
1261 // undef >> X -> 0
1262 // undef >> X -> undef (if it's exact)
1263 if (match(Op0, m_Undef()))
1264 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1265
1266 // The low bit cannot be shifted out of an exact shift if it is set.
1267 if (isExact) {
1268 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1269 if (Op0Known.One[0])
1270 return Op0;
1271 }
1272
1273 return nullptr;
1274}
1275
1276/// Given operands for an Shl, see if we can fold the result.
1277/// If not, this returns null.
1278static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1279 const SimplifyQuery &Q, unsigned MaxRecurse) {
1280 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
1281 return V;
1282
1283 // undef << X -> 0
1284 // undef << X -> undef if (if it's NSW/NUW)
1285 if (match(Op0, m_Undef()))
1286 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1287
1288 // (X >> A) << A -> X
1289 Value *X;
1290 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1291 return X;
1292
1293 // shl nuw i8 C, %x -> C iff C has sign bit set.
1294 if (isNUW && match(Op0, m_Negative()))
1295 return Op0;
1296 // NOTE: could use computeKnownBits() / LazyValueInfo,
1297 // but the cost-benefit analysis suggests it isn't worth it.
1298
1299 return nullptr;
1300}
1301
1302Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1303 const SimplifyQuery &Q) {
1304 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1305}
1306
1307/// Given operands for an LShr, see if we can fold the result.
1308/// If not, this returns null.
1309static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1310 const SimplifyQuery &Q, unsigned MaxRecurse) {
1311 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1312 MaxRecurse))
1313 return V;
1314
1315 // (X << A) >> A -> X
1316 Value *X;
1317 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1318 return X;
1319
1320 return nullptr;
1321}
1322
1323Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1324 const SimplifyQuery &Q) {
1325 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1326}
1327
1328/// Given operands for an AShr, see if we can fold the result.
1329/// If not, this returns null.
1330static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1331 const SimplifyQuery &Q, unsigned MaxRecurse) {
1332 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1333 MaxRecurse))
1334 return V;
1335
1336 // all ones >>a X -> -1
1337 // Do not return Op0 because it may contain undef elements if it's a vector.
1338 if (match(Op0, m_AllOnes()))
1339 return Constant::getAllOnesValue(Op0->getType());
1340
1341 // (X << A) >> A -> X
1342 Value *X;
1343 if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1344 return X;
1345
1346 // Arithmetic shifting an all-sign-bit value is a no-op.
1347 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1348 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1349 return Op0;
1350
1351 return nullptr;
1352}
1353
1354Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1355 const SimplifyQuery &Q) {
1356 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1357}
1358
1359/// Commuted variants are assumed to be handled by calling this function again
1360/// with the parameters swapped.
1361static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1362 ICmpInst *UnsignedICmp, bool IsAnd) {
1363 Value *X, *Y;
1364
1365 ICmpInst::Predicate EqPred;
1366 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1367 !ICmpInst::isEquality(EqPred))
1368 return nullptr;
1369
1370 ICmpInst::Predicate UnsignedPred;
1371 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1372 ICmpInst::isUnsigned(UnsignedPred))
1373 ;
1374 else if (match(UnsignedICmp,
1375 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1376 ICmpInst::isUnsigned(UnsignedPred))
1377 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1378 else
1379 return nullptr;
1380
1381 // X < Y && Y != 0 --> X < Y
1382 // X < Y || Y != 0 --> Y != 0
1383 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1384 return IsAnd ? UnsignedICmp : ZeroICmp;
1385
1386 // X >= Y || Y != 0 --> true
1387 // X >= Y || Y == 0 --> X >= Y
1388 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) {
1389 if (EqPred == ICmpInst::ICMP_NE)
1390 return getTrue(UnsignedICmp->getType());
1391 return UnsignedICmp;
1392 }
1393
1394 // X < Y && Y == 0 --> false
1395 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1396 IsAnd)
1397 return getFalse(UnsignedICmp->getType());
1398
1399 return nullptr;
1400}
1401
1402/// Commuted variants are assumed to be handled by calling this function again
1403/// with the parameters swapped.
1404static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1405 ICmpInst::Predicate Pred0, Pred1;
1406 Value *A ,*B;
1407 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1408 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1409 return nullptr;
1410
1411 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1412 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1413 // can eliminate Op1 from this 'and'.
1414 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1415 return Op0;
1416
1417 // Check for any combination of predicates that are guaranteed to be disjoint.
1418 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1419 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1420 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1421 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1422 return getFalse(Op0->getType());
1423
1424 return nullptr;
1425}
1426
1427/// Commuted variants are assumed to be handled by calling this function again
1428/// with the parameters swapped.
1429static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1430 ICmpInst::Predicate Pred0, Pred1;
1431 Value *A ,*B;
1432 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1433 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1434 return nullptr;
1435
1436 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1437 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1438 // can eliminate Op0 from this 'or'.
1439 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1440 return Op1;
1441
1442 // Check for any combination of predicates that cover the entire range of
1443 // possibilities.
1444 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1445 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1446 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1447 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1448 return getTrue(Op0->getType());
1449
1450 return nullptr;
1451}
1452
1453/// Test if a pair of compares with a shared operand and 2 constants has an
1454/// empty set intersection, full set union, or if one compare is a superset of
1455/// the other.
1456static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1457 bool IsAnd) {
1458 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1459 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1460 return nullptr;
1461
1462 const APInt *C0, *C1;
1463 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1464 !match(Cmp1->getOperand(1), m_APInt(C1)))
1465 return nullptr;
1466
1467 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1468 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1469
1470 // For and-of-compares, check if the intersection is empty:
1471 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1472 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1473 return getFalse(Cmp0->getType());
1474
1475 // For or-of-compares, check if the union is full:
1476 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1477 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1478 return getTrue(Cmp0->getType());
1479
1480 // Is one range a superset of the other?
1481 // If this is and-of-compares, take the smaller set:
1482 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1483 // If this is or-of-compares, take the larger set:
1484 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1485 if (Range0.contains(Range1))
1486 return IsAnd ? Cmp1 : Cmp0;
1487 if (Range1.contains(Range0))
1488 return IsAnd ? Cmp0 : Cmp1;
1489
1490 return nullptr;
1491}
1492
1493static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1494 bool IsAnd) {
1495 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1496 if (!match(Cmp0->getOperand(1), m_Zero()) ||
1497 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1498 return nullptr;
1499
1500 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1501 return nullptr;
1502
1503 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1504 Value *X = Cmp0->getOperand(0);
1505 Value *Y = Cmp1->getOperand(0);
1506
1507 // If one of the compares is a masked version of a (not) null check, then
1508 // that compare implies the other, so we eliminate the other. Optionally, look
1509 // through a pointer-to-int cast to match a null check of a pointer type.
1510
1511 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1512 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1513 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1514 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1515 if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1516 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1517 return Cmp1;
1518
1519 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1520 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1521 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1522 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1523 if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1524 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1525 return Cmp0;
1526
1527 return nullptr;
1528}
1529
1530static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
1531 // (icmp (add V, C0), C1) & (icmp V, C0)
1532 ICmpInst::Predicate Pred0, Pred1;
1533 const APInt *C0, *C1;
1534 Value *V;
1535 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1536 return nullptr;
1537
1538 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1539 return nullptr;
1540
1541 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1542 if (AddInst->getOperand(1) != Op1->getOperand(1))
1543 return nullptr;
1544
1545 Type *ITy = Op0->getType();
1546 bool isNSW = AddInst->hasNoSignedWrap();
1547 bool isNUW = AddInst->hasNoUnsignedWrap();
1548
1549 const APInt Delta = *C1 - *C0;
1550 if (C0->isStrictlyPositive()) {
1551 if (Delta == 2) {
1552 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1553 return getFalse(ITy);
1554 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1555 return getFalse(ITy);
1556 }
1557 if (Delta == 1) {
1558 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1559 return getFalse(ITy);
1560 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1561 return getFalse(ITy);
1562 }
1563 }
1564 if (C0->getBoolValue() && isNUW) {
1565 if (Delta == 2)
1566 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1567 return getFalse(ITy);
1568 if (Delta == 1)
1569 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1570 return getFalse(ITy);
1571 }
1572
1573 return nullptr;
1574}
1575
1576static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1577 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
1578 return X;
1579 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true))
1580 return X;
1581
1582 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1583 return X;
1584 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
1585 return X;
1586
1587 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1588 return X;
1589
1590 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1591 return X;
1592
1593 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1))
1594 return X;
1595 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0))
1596 return X;
1597
1598 return nullptr;
1599}
1600
1601static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
1602 // (icmp (add V, C0), C1) | (icmp V, C0)
1603 ICmpInst::Predicate Pred0, Pred1;
1604 const APInt *C0, *C1;
1605 Value *V;
1606 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1607 return nullptr;
1608
1609 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1610 return nullptr;
1611
1612 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1613 if (AddInst->getOperand(1) != Op1->getOperand(1))
1614 return nullptr;
1615
1616 Type *ITy = Op0->getType();
1617 bool isNSW = AddInst->hasNoSignedWrap();
1618 bool isNUW = AddInst->hasNoUnsignedWrap();
1619
1620 const APInt Delta = *C1 - *C0;
1621 if (C0->isStrictlyPositive()) {
1622 if (Delta == 2) {
1623 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1624 return getTrue(ITy);
1625 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1626 return getTrue(ITy);
1627 }
1628 if (Delta == 1) {
1629 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1630 return getTrue(ITy);
1631 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1632 return getTrue(ITy);
1633 }
1634 }
1635 if (C0->getBoolValue() && isNUW) {
1636 if (Delta == 2)
1637 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1638 return getTrue(ITy);
1639 if (Delta == 1)
1640 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1641 return getTrue(ITy);
1642 }
1643
1644 return nullptr;
1645}
1646
1647static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
1648 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
1649 return X;
1650 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false))
1651 return X;
1652
1653 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1654 return X;
1655 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
1656 return X;
1657
1658 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1659 return X;
1660
1661 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1662 return X;
1663
1664 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1))
1665 return X;
1666 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0))
1667 return X;
1668
1669 return nullptr;
1670}
1671
1672static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1673 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1674 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1675 if (LHS0->getType() != RHS0->getType())
1676 return nullptr;
1677
1678 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1679 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1680 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1681 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1682 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1683 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1684 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1685 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1686 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1687 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1688 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1689 if ((isKnownNeverNaN(LHS0) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1690 (isKnownNeverNaN(LHS1) && (LHS0 == RHS0 || LHS0 == RHS1)))
1691 return RHS;
1692
1693 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1694 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1695 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1696 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1697 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1698 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1699 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1700 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1701 if ((isKnownNeverNaN(RHS0) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1702 (isKnownNeverNaN(RHS1) && (RHS0 == LHS0 || RHS0 == LHS1)))
1703 return LHS;
1704 }
1705
1706 return nullptr;
1707}
1708
1709static Value *simplifyAndOrOfCmps(Value *Op0, Value *Op1, bool IsAnd) {
1710 // Look through casts of the 'and' operands to find compares.
1711 auto *Cast0 = dyn_cast<CastInst>(Op0);
1712 auto *Cast1 = dyn_cast<CastInst>(Op1);
1713 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1714 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1715 Op0 = Cast0->getOperand(0);
1716 Op1 = Cast1->getOperand(0);
1717 }
1718
1719 Value *V = nullptr;
1720 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1721 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1722 if (ICmp0 && ICmp1)
1723 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1) :
1724 simplifyOrOfICmps(ICmp0, ICmp1);
1725
1726 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1727 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1728 if (FCmp0 && FCmp1)
1729 V = simplifyAndOrOfFCmps(FCmp0, FCmp1, IsAnd);
1730
1731 if (!V)
1732 return nullptr;
1733 if (!Cast0)
1734 return V;
1735
1736 // If we looked through casts, we can only handle a constant simplification
1737 // because we are not allowed to create a cast instruction here.
1738 if (auto *C = dyn_cast<Constant>(V))
1739 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
1740
1741 return nullptr;
1742}
1743
1744/// Given operands for an And, see if we can fold the result.
1745/// If not, this returns null.
1746static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1747 unsigned MaxRecurse) {
1748 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
1749 return C;
1750
1751 // X & undef -> 0
1752 if (match(Op1, m_Undef()))
1753 return Constant::getNullValue(Op0->getType());
1754
1755 // X & X = X
1756 if (Op0 == Op1)
1757 return Op0;
1758
1759 // X & 0 = 0
1760 if (match(Op1, m_Zero()))
1761 return Constant::getNullValue(Op0->getType());
1762
1763 // X & -1 = X
1764 if (match(Op1, m_AllOnes()))
1765 return Op0;
1766
1767 // A & ~A = ~A & A = 0
1768 if (match(Op0, m_Not(m_Specific(Op1))) ||
1769 match(Op1, m_Not(m_Specific(Op0))))
1770 return Constant::getNullValue(Op0->getType());
1771
1772 // (A | ?) & A = A
1773 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
1774 return Op1;
1775
1776 // A & (A | ?) = A
1777 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
1778 return Op0;
1779
1780 // A mask that only clears known zeros of a shifted value is a no-op.
1781 Value *X;
1782 const APInt *Mask;
1783 const APInt *ShAmt;
1784 if (match(Op1, m_APInt(Mask))) {
1785 // If all bits in the inverted and shifted mask are clear:
1786 // and (shl X, ShAmt), Mask --> shl X, ShAmt
1787 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
1788 (~(*Mask)).lshr(*ShAmt).isNullValue())
1789 return Op0;
1790
1791 // If all bits in the inverted and shifted mask are clear:
1792 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
1793 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1794 (~(*Mask)).shl(*ShAmt).isNullValue())
1795 return Op0;
1796 }
1797
1798 // A & (-A) = A if A is a power of two or zero.
1799 if (match(Op0, m_Neg(m_Specific(Op1))) ||
1800 match(Op1, m_Neg(m_Specific(Op0)))) {
1801 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1802 Q.DT))
1803 return Op0;
1804 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
1805 Q.DT))
1806 return Op1;
1807 }
1808
1809 if (Value *V = simplifyAndOrOfCmps(Op0, Op1, true))
1810 return V;
1811
1812 // Try some generic simplifications for associative operations.
1813 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
1814 MaxRecurse))
1815 return V;
1816
1817 // And distributes over Or. Try some generic simplifications based on this.
1818 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
1819 Q, MaxRecurse))
1820 return V;
1821
1822 // And distributes over Xor. Try some generic simplifications based on this.
1823 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
1824 Q, MaxRecurse))
1825 return V;
1826
1827 // If the operation is with the result of a select instruction, check whether
1828 // operating on either branch of the select always yields the same value.
1829 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1830 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
1831 MaxRecurse))
1832 return V;
1833
1834 // If the operation is with the result of a phi instruction, check whether
1835 // operating on all incoming values of the phi always yields the same value.
1836 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1837 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
1838 MaxRecurse))
1839 return V;
1840
1841 return nullptr;
1842}
1843
1844Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1845 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
1846}
1847
1848/// Given operands for an Or, see if we can fold the result.
1849/// If not, this returns null.
1850static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1851 unsigned MaxRecurse) {
1852 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
1853 return C;
1854
1855 // X | undef -> -1
1856 // X | -1 = -1
1857 // Do not return Op1 because it may contain undef elements if it's a vector.
1858 if (match(Op1, m_Undef()) || match(Op1, m_AllOnes()))
1859 return Constant::getAllOnesValue(Op0->getType());
1860
1861 // X | X = X
1862 // X | 0 = X
1863 if (Op0 == Op1 || match(Op1, m_Zero()))
1864 return Op0;
1865
1866 // A | ~A = ~A | A = -1
1867 if (match(Op0, m_Not(m_Specific(Op1))) ||
1868 match(Op1, m_Not(m_Specific(Op0))))
1869 return Constant::getAllOnesValue(Op0->getType());
1870
1871 // (A & ?) | A = A
1872 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())))
1873 return Op1;
1874
1875 // A | (A & ?) = A
1876 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())))
1877 return Op0;
1878
1879 // ~(A & ?) | A = -1
1880 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
1881 return Constant::getAllOnesValue(Op1->getType());
1882
1883 // A | ~(A & ?) = -1
1884 if (match(Op1, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
1885 return Constant::getAllOnesValue(Op0->getType());
1886
1887 Value *A, *B;
1888 // (A & ~B) | (A ^ B) -> (A ^ B)
1889 // (~B & A) | (A ^ B) -> (A ^ B)
1890 // (A & ~B) | (B ^ A) -> (B ^ A)
1891 // (~B & A) | (B ^ A) -> (B ^ A)
1892 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1893 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
1894 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
1895 return Op1;
1896
1897 // Commute the 'or' operands.
1898 // (A ^ B) | (A & ~B) -> (A ^ B)
1899 // (A ^ B) | (~B & A) -> (A ^ B)
1900 // (B ^ A) | (A & ~B) -> (B ^ A)
1901 // (B ^ A) | (~B & A) -> (B ^ A)
1902 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1903 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
1904 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
1905 return Op0;
1906
1907 // (A & B) | (~A ^ B) -> (~A ^ B)
1908 // (B & A) | (~A ^ B) -> (~A ^ B)
1909 // (A & B) | (B ^ ~A) -> (B ^ ~A)
1910 // (B & A) | (B ^ ~A) -> (B ^ ~A)
1911 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1912 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
1913 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
1914 return Op1;
1915
1916 // (~A ^ B) | (A & B) -> (~A ^ B)
1917 // (~A ^ B) | (B & A) -> (~A ^ B)
1918 // (B ^ ~A) | (A & B) -> (B ^ ~A)
1919 // (B ^ ~A) | (B & A) -> (B ^ ~A)
1920 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
1921 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
1922 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
1923 return Op0;
1924
1925 if (Value *V = simplifyAndOrOfCmps(Op0, Op1, false))
1926 return V;
1927
1928 // Try some generic simplifications for associative operations.
1929 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
1930 MaxRecurse))
1931 return V;
1932
1933 // Or distributes over And. Try some generic simplifications based on this.
1934 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
1935 MaxRecurse))
1936 return V;
1937
1938 // If the operation is with the result of a select instruction, check whether
1939 // operating on either branch of the select always yields the same value.
1940 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1941 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
1942 MaxRecurse))
1943 return V;
1944
1945 // (A & C1)|(B & C2)
1946 const APInt *C1, *C2;
1947 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
1948 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
1949 if (*C1 == ~*C2) {
1950 // (A & C1)|(B & C2)
1951 // If we have: ((V + N) & C1) | (V & C2)
1952 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1953 // replace with V+N.
1954 Value *N;
1955 if (C2->isMask() && // C2 == 0+1+
1956 match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
1957 // Add commutes, try both ways.
1958 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1959 return A;
1960 }
1961 // Or commutes, try both ways.
1962 if (C1->isMask() &&
1963 match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
1964 // Add commutes, try both ways.
1965 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
1966 return B;
1967 }
1968 }
1969 }
1970
1971 // If the operation is with the result of a phi instruction, check whether
1972 // operating on all incoming values of the phi always yields the same value.
1973 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1974 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
1975 return V;
1976
1977 return nullptr;
1978}
1979
1980Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1981 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
1982}
1983
1984/// Given operands for a Xor, see if we can fold the result.
1985/// If not, this returns null.
1986static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1987 unsigned MaxRecurse) {
1988 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
1989 return C;
1990
1991 // A ^ undef -> undef
1992 if (match(Op1, m_Undef()))
1993 return Op1;
1994
1995 // A ^ 0 = A
1996 if (match(Op1, m_Zero()))
1997 return Op0;
1998
1999 // A ^ A = 0
2000 if (Op0 == Op1)
2001 return Constant::getNullValue(Op0->getType());
2002
2003 // A ^ ~A = ~A ^ A = -1
2004 if (match(Op0, m_Not(m_Specific(Op1))) ||
2005 match(Op1, m_Not(m_Specific(Op0))))
2006 return Constant::getAllOnesValue(Op0->getType());
2007
2008 // Try some generic simplifications for associative operations.
2009 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
2010 MaxRecurse))
2011 return V;
2012
2013 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2014 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2015 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2016 // only if B and C are equal. If B and C are equal then (since we assume
2017 // that operands have already been simplified) "select(cond, B, C)" should
2018 // have been simplified to the common value of B and C already. Analysing
2019 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2020 // for threading over phi nodes.
2021
2022 return nullptr;
2023}
2024
2025Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2026 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
2027}
2028
2029
2030static Type *GetCompareTy(Value *Op) {
2031 return CmpInst::makeCmpResultType(Op->getType());
2032}
2033
2034/// Rummage around inside V looking for something equivalent to the comparison
2035/// "LHS Pred RHS". Return such a value if found, otherwise return null.
2036/// Helper function for analyzing max/min idioms.
2037static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2038 Value *LHS, Value *RHS) {
2039 SelectInst *SI = dyn_cast<SelectInst>(V);
2040 if (!SI)
2041 return nullptr;
2042 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2043 if (!Cmp)
2044 return nullptr;
2045 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2046 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2047 return Cmp;
2048 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2049 LHS == CmpRHS && RHS == CmpLHS)
2050 return Cmp;
2051 return nullptr;
2052}
2053
2054// A significant optimization not implemented here is assuming that alloca
2055// addresses are not equal to incoming argument values. They don't *alias*,
2056// as we say, but that doesn't mean they aren't equal, so we take a
2057// conservative approach.
2058//
2059// This is inspired in part by C++11 5.10p1:
2060// "Two pointers of the same type compare equal if and only if they are both
2061// null, both point to the same function, or both represent the same
2062// address."
2063//
2064// This is pretty permissive.
2065//
2066// It's also partly due to C11 6.5.9p6:
2067// "Two pointers compare equal if and only if both are null pointers, both are
2068// pointers to the same object (including a pointer to an object and a
2069// subobject at its beginning) or function, both are pointers to one past the
2070// last element of the same array object, or one is a pointer to one past the
2071// end of one array object and the other is a pointer to the start of a
2072// different array object that happens to immediately follow the first array
2073// object in the address space.)
2074//
2075// C11's version is more restrictive, however there's no reason why an argument
2076// couldn't be a one-past-the-end value for a stack object in the caller and be
2077// equal to the beginning of a stack object in the callee.
2078//
2079// If the C and C++ standards are ever made sufficiently restrictive in this
2080// area, it may be possible to update LLVM's semantics accordingly and reinstate
2081// this optimization.
2082static Constant *
2083computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
2084 const DominatorTree *DT, CmpInst::Predicate Pred,
2085 AssumptionCache *AC, const Instruction *CxtI,
2086 Value *LHS, Value *RHS) {
2087 // First, skip past any trivial no-ops.
2088 LHS = LHS->stripPointerCasts();
2089 RHS = RHS->stripPointerCasts();
2090
2091 // A non-null pointer is not equal to a null pointer.
2092 if (llvm::isKnownNonZero(LHS, DL) && isa<ConstantPointerNull>(RHS) &&
2093 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
2094 return ConstantInt::get(GetCompareTy(LHS),
2095 !CmpInst::isTrueWhenEqual(Pred));
2096
2097 // We can only fold certain predicates on pointer comparisons.
2098 switch (Pred) {
2099 default:
2100 return nullptr;
2101
2102 // Equality comaprisons are easy to fold.
2103 case CmpInst::ICMP_EQ:
2104 case CmpInst::ICMP_NE:
2105 break;
2106
2107 // We can only handle unsigned relational comparisons because 'inbounds' on
2108 // a GEP only protects against unsigned wrapping.
2109 case CmpInst::ICMP_UGT:
2110 case CmpInst::ICMP_UGE:
2111 case CmpInst::ICMP_ULT:
2112 case CmpInst::ICMP_ULE:
2113 // However, we have to switch them to their signed variants to handle
2114 // negative indices from the base pointer.
2115 Pred = ICmpInst::getSignedPredicate(Pred);
2116 break;
2117 }
2118
2119 // Strip off any constant offsets so that we can reason about them.
2120 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2121 // here and compare base addresses like AliasAnalysis does, however there are
2122 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2123 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2124 // doesn't need to guarantee pointer inequality when it says NoAlias.
2125 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2126 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2127
2128 // If LHS and RHS are related via constant offsets to the same base
2129 // value, we can replace it with an icmp which just compares the offsets.
2130 if (LHS == RHS)
2131 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2132
2133 // Various optimizations for (in)equality comparisons.
2134 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2135 // Different non-empty allocations that exist at the same time have
2136 // different addresses (if the program can tell). Global variables always
2137 // exist, so they always exist during the lifetime of each other and all
2138 // allocas. Two different allocas usually have different addresses...
2139 //
2140 // However, if there's an @llvm.stackrestore dynamically in between two
2141 // allocas, they may have the same address. It's tempting to reduce the
2142 // scope of the problem by only looking at *static* allocas here. That would
2143 // cover the majority of allocas while significantly reducing the likelihood
2144 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2145 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2146 // an entry block. Also, if we have a block that's not attached to a
2147 // function, we can't tell if it's "static" under the current definition.
2148 // Theoretically, this problem could be fixed by creating a new kind of
2149 // instruction kind specifically for static allocas. Such a new instruction
2150 // could be required to be at the top of the entry block, thus preventing it
2151 // from being subject to a @llvm.stackrestore. Instcombine could even
2152 // convert regular allocas into these special allocas. It'd be nifty.
2153 // However, until then, this problem remains open.
2154 //
2155 // So, we'll assume that two non-empty allocas have different addresses
2156 // for now.
2157 //
2158 // With all that, if the offsets are within the bounds of their allocations
2159 // (and not one-past-the-end! so we can't use inbounds!), and their
2160 // allocations aren't the same, the pointers are not equal.
2161 //
2162 // Note that it's not necessary to check for LHS being a global variable
2163 // address, due to canonicalization and constant folding.
2164 if (isa<AllocaInst>(LHS) &&
2165 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2166 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2167 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2168 uint64_t LHSSize, RHSSize;
2169 ObjectSizeOpts Opts;
2170 Opts.NullIsUnknownSize =
2171 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction());
2172 if (LHSOffsetCI && RHSOffsetCI &&
2173 getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2174 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2175 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2176 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2177 if (!LHSOffsetValue.isNegative() &&
2178 !RHSOffsetValue.isNegative() &&
2179 LHSOffsetValue.ult(LHSSize) &&
2180 RHSOffsetValue.ult(RHSSize)) {
2181 return ConstantInt::get(GetCompareTy(LHS),
2182 !CmpInst::isTrueWhenEqual(Pred));
2183 }
2184 }
2185
2186 // Repeat the above check but this time without depending on DataLayout
2187 // or being able to compute a precise size.
2188 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2189 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2190 LHSOffset->isNullValue() &&
2191 RHSOffset->isNullValue())
2192 return ConstantInt::get(GetCompareTy(LHS),
2193 !CmpInst::isTrueWhenEqual(Pred));
2194 }
2195
2196 // Even if an non-inbounds GEP occurs along the path we can still optimize
2197 // equality comparisons concerning the result. We avoid walking the whole
2198 // chain again by starting where the last calls to
2199 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2200 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2201 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2202 if (LHS == RHS)
2203 return ConstantExpr::getICmp(Pred,
2204 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2205 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2206
2207 // If one side of the equality comparison must come from a noalias call
2208 // (meaning a system memory allocation function), and the other side must
2209 // come from a pointer that cannot overlap with dynamically-allocated
2210 // memory within the lifetime of the current function (allocas, byval
2211 // arguments, globals), then determine the comparison result here.
2212 SmallVector<Value *, 8> LHSUObjs, RHSUObjs;
2213 GetUnderlyingObjects(LHS, LHSUObjs, DL);
2214 GetUnderlyingObjects(RHS, RHSUObjs, DL);
2215
2216 // Is the set of underlying objects all noalias calls?
2217 auto IsNAC = [](ArrayRef<Value *> Objects) {
2218 return all_of(Objects, isNoAliasCall);
2219 };
2220
2221 // Is the set of underlying objects all things which must be disjoint from
2222 // noalias calls. For allocas, we consider only static ones (dynamic
2223 // allocas might be transformed into calls to malloc not simultaneously
2224 // live with the compared-to allocation). For globals, we exclude symbols
2225 // that might be resolve lazily to symbols in another dynamically-loaded
2226 // library (and, thus, could be malloc'ed by the implementation).
2227 auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) {
2228 return all_of(Objects, [](Value *V) {
2229 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2230 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2231 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2232 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2233 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2234 !GV->isThreadLocal();
2235 if (const Argument *A = dyn_cast<Argument>(V))
2236 return A->hasByValAttr();
2237 return false;
2238 });
2239 };
2240
2241 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2242 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2243 return ConstantInt::get(GetCompareTy(LHS),
2244 !CmpInst::isTrueWhenEqual(Pred));
2245
2246 // Fold comparisons for non-escaping pointer even if the allocation call
2247 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2248 // dynamic allocation call could be either of the operands.
2249 Value *MI = nullptr;
2250 if (isAllocLikeFn(LHS, TLI) &&
2251 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2252 MI = LHS;
2253 else if (isAllocLikeFn(RHS, TLI) &&
2254 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2255 MI = RHS;
2256 // FIXME: We should also fold the compare when the pointer escapes, but the
2257 // compare dominates the pointer escape
2258 if (MI && !PointerMayBeCaptured(MI, true, true))
2259 return ConstantInt::get(GetCompareTy(LHS),
2260 CmpInst::isFalseWhenEqual(Pred));
2261 }
2262
2263 // Otherwise, fail.
2264 return nullptr;
2265}
2266
2267/// Fold an icmp when its operands have i1 scalar type.
2268static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2269 Value *RHS, const SimplifyQuery &Q) {
2270 Type *ITy = GetCompareTy(LHS); // The return type.
2271 Type *OpTy = LHS->getType(); // The operand type.
2272 if (!OpTy->isIntOrIntVectorTy(1))
2273 return nullptr;
2274
2275 // A boolean compared to true/false can be simplified in 14 out of the 20
2276 // (10 predicates * 2 constants) possible combinations. Cases not handled here
2277 // require a 'not' of the LHS, so those must be transformed in InstCombine.
2278 if (match(RHS, m_Zero())) {
2279 switch (Pred) {
2280 case CmpInst::ICMP_NE: // X != 0 -> X
2281 case CmpInst::ICMP_UGT: // X >u 0 -> X
2282 case CmpInst::ICMP_SLT: // X <s 0 -> X
2283 return LHS;
2284
2285 case CmpInst::ICMP_ULT: // X <u 0 -> false
2286 case CmpInst::ICMP_SGT: // X >s 0 -> false
2287 return getFalse(ITy);
2288
2289 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2290 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2291 return getTrue(ITy);
2292
2293 default: break;
2294 }
2295 } else if (match(RHS, m_One())) {
2296 switch (Pred) {
2297 case CmpInst::ICMP_EQ: // X == 1 -> X
2298 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2299 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2300 return LHS;
2301
2302 case CmpInst::ICMP_UGT: // X >u 1 -> false
2303 case CmpInst::ICMP_SLT: // X <s -1 -> false
2304 return getFalse(ITy);
2305
2306 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2307 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2308 return getTrue(ITy);
2309
2310 default: break;
2311 }
2312 }
2313
2314 switch (Pred) {
2315 default:
2316 break;
2317 case ICmpInst::ICMP_UGE:
2318 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2319 return getTrue(ITy);
2320 break;
2321 case ICmpInst::ICMP_SGE:
2322 /// For signed comparison, the values for an i1 are 0 and -1
2323 /// respectively. This maps into a truth table of:
2324 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2325 /// 0 | 0 | 1 (0 >= 0) | 1
2326 /// 0 | 1 | 1 (0 >= -1) | 1
2327 /// 1 | 0 | 0 (-1 >= 0) | 0
2328 /// 1 | 1 | 1 (-1 >= -1) | 1
2329 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2330 return getTrue(ITy);
2331 break;
2332 case ICmpInst::ICMP_ULE:
2333 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2334 return getTrue(ITy);
2335 break;
2336 }
2337
2338 return nullptr;
2339}
2340
2341/// Try hard to fold icmp with zero RHS because this is a common case.
2342static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2343 Value *RHS, const SimplifyQuery &Q) {
2344 if (!match(RHS, m_Zero()))
2345 return nullptr;
2346
2347 Type *ITy = GetCompareTy(LHS); // The return type.
2348 switch (Pred) {
2349 default:
2350 llvm_unreachable("Unknown ICmp predicate!")::llvm::llvm_unreachable_internal("Unknown ICmp predicate!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 2350)
;
2351 case ICmpInst::ICMP_ULT:
2352 return getFalse(ITy);
2353 case ICmpInst::ICMP_UGE:
2354 return getTrue(ITy);
2355 case ICmpInst::ICMP_EQ:
2356 case ICmpInst::ICMP_ULE:
2357 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2358 return getFalse(ITy);
2359 break;
2360 case ICmpInst::ICMP_NE:
2361 case ICmpInst::ICMP_UGT:
2362 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2363 return getTrue(ITy);
2364 break;
2365 case ICmpInst::ICMP_SLT: {
2366 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2367 if (LHSKnown.isNegative())
2368 return getTrue(ITy);
2369 if (LHSKnown.isNonNegative())
2370 return getFalse(ITy);
2371 break;
2372 }
2373 case ICmpInst::ICMP_SLE: {
2374 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2375 if (LHSKnown.isNegative())
2376 return getTrue(ITy);
2377 if (LHSKnown.isNonNegative() &&
2378 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2379 return getFalse(ITy);
2380 break;
2381 }
2382 case ICmpInst::ICMP_SGE: {
2383 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2384 if (LHSKnown.isNegative())
2385 return getFalse(ITy);
2386 if (LHSKnown.isNonNegative())
2387 return getTrue(ITy);
2388 break;
2389 }
2390 case ICmpInst::ICMP_SGT: {
2391 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2392 if (LHSKnown.isNegative())
2393 return getFalse(ITy);
2394 if (LHSKnown.isNonNegative() &&
2395 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2396 return getTrue(ITy);
2397 break;
2398 }
2399 }
2400
2401 return nullptr;
2402}
2403
2404/// Many binary operators with a constant operand have an easy-to-compute
2405/// range of outputs. This can be used to fold a comparison to always true or
2406/// always false.
2407static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) {
2408 unsigned Width = Lower.getBitWidth();
2409 const APInt *C;
2410 switch (BO.getOpcode()) {
2411 case Instruction::Add:
2412 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
2413 // FIXME: If we have both nuw and nsw, we should reduce the range further.
2414 if (BO.hasNoUnsignedWrap()) {
2415 // 'add nuw x, C' produces [C, UINT_MAX].
2416 Lower = *C;
2417 } else if (BO.hasNoSignedWrap()) {
2418 if (C->isNegative()) {
2419 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
2420 Lower = APInt::getSignedMinValue(Width);
2421 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
2422 } else {
2423 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
2424 Lower = APInt::getSignedMinValue(Width) + *C;
2425 Upper = APInt::getSignedMaxValue(Width) + 1;
2426 }
2427 }
2428 }
2429 break;
2430
2431 case Instruction::And:
2432 if (match(BO.getOperand(1), m_APInt(C)))
2433 // 'and x, C' produces [0, C].
2434 Upper = *C + 1;
2435 break;
2436
2437 case Instruction::Or:
2438 if (match(BO.getOperand(1), m_APInt(C)))
2439 // 'or x, C' produces [C, UINT_MAX].
2440 Lower = *C;
2441 break;
2442
2443 case Instruction::AShr:
2444 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2445 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
2446 Lower = APInt::getSignedMinValue(Width).ashr(*C);
2447 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
2448 } else if (match(BO.getOperand(0), m_APInt(C))) {
2449 unsigned ShiftAmount = Width - 1;
2450 if (!C->isNullValue() && BO.isExact())
2451 ShiftAmount = C->countTrailingZeros();
2452 if (C->isNegative()) {
2453 // 'ashr C, x' produces [C, C >> (Width-1)]
2454 Lower = *C;
2455 Upper = C->ashr(ShiftAmount) + 1;
2456 } else {
2457 // 'ashr C, x' produces [C >> (Width-1), C]
2458 Lower = C->ashr(ShiftAmount);
2459 Upper = *C + 1;
2460 }
2461 }
2462 break;
2463
2464 case Instruction::LShr:
2465 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
2466 // 'lshr x, C' produces [0, UINT_MAX >> C].
2467 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
2468 } else if (match(BO.getOperand(0), m_APInt(C))) {
2469 // 'lshr C, x' produces [C >> (Width-1), C].
2470 unsigned ShiftAmount = Width - 1;
2471 if (!C->isNullValue() && BO.isExact())
2472 ShiftAmount = C->countTrailingZeros();
2473 Lower = C->lshr(ShiftAmount);
2474 Upper = *C + 1;
2475 }
2476 break;
2477
2478 case Instruction::Shl:
2479 if (match(BO.getOperand(0), m_APInt(C))) {
2480 if (BO.hasNoUnsignedWrap()) {
2481 // 'shl nuw C, x' produces [C, C << CLZ(C)]
2482 Lower = *C;
2483 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
2484 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
2485 if (C->isNegative()) {
2486 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
2487 unsigned ShiftAmount = C->countLeadingOnes() - 1;
2488 Lower = C->shl(ShiftAmount);
2489 Upper = *C + 1;
2490 } else {
2491 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
2492 unsigned ShiftAmount = C->countLeadingZeros() - 1;
2493 Lower = *C;
2494 Upper = C->shl(ShiftAmount) + 1;
2495 }
2496 }
2497 }
2498 break;
2499
2500 case Instruction::SDiv:
2501 if (match(BO.getOperand(1), m_APInt(C))) {
2502 APInt IntMin = APInt::getSignedMinValue(Width);
2503 APInt IntMax = APInt::getSignedMaxValue(Width);
2504 if (C->isAllOnesValue()) {
2505 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
2506 // where C != -1 and C != 0 and C != 1
2507 Lower = IntMin + 1;
2508 Upper = IntMax + 1;
2509 } else if (C->countLeadingZeros() < Width - 1) {
2510 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
2511 // where C != -1 and C != 0 and C != 1
2512 Lower = IntMin.sdiv(*C);
2513 Upper = IntMax.sdiv(*C);
2514 if (Lower.sgt(Upper))
2515 std::swap(Lower, Upper);
2516 Upper = Upper + 1;
2517 assert(Upper != Lower && "Upper part of range has wrapped!")(static_cast <bool> (Upper != Lower && "Upper part of range has wrapped!"
) ? void (0) : __assert_fail ("Upper != Lower && \"Upper part of range has wrapped!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 2517, __extension__ __PRETTY_FUNCTION__))
;
2518 }
2519 } else if (match(BO.getOperand(0), m_APInt(C))) {
2520 if (C->isMinSignedValue()) {
2521 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
2522 Lower = *C;
2523 Upper = Lower.lshr(1) + 1;
2524 } else {
2525 // 'sdiv C, x' produces [-|C|, |C|].
2526 Upper = C->abs() + 1;
2527 Lower = (-Upper) + 1;
2528 }
2529 }
2530 break;
2531
2532 case Instruction::UDiv:
2533 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
2534 // 'udiv x, C' produces [0, UINT_MAX / C].
2535 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
2536 } else if (match(BO.getOperand(0), m_APInt(C))) {
2537 // 'udiv C, x' produces [0, C].
2538 Upper = *C + 1;
2539 }
2540 break;
2541
2542 case Instruction::SRem:
2543 if (match(BO.getOperand(1), m_APInt(C))) {
2544 // 'srem x, C' produces (-|C|, |C|).
2545 Upper = C->abs();
2546 Lower = (-Upper) + 1;
2547 }
2548 break;
2549
2550 case Instruction::URem:
2551 if (match(BO.getOperand(1), m_APInt(C)))
2552 // 'urem x, C' produces [0, C).
2553 Upper = *C;
2554 break;
2555
2556 default:
2557 break;
2558 }
2559}
2560
2561static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2562 Value *RHS) {
2563 Type *ITy = GetCompareTy(RHS); // The return type.
2564
2565 Value *X;
2566 // Sign-bit checks can be optimized to true/false after unsigned
2567 // floating-point casts:
2568 // icmp slt (bitcast (uitofp X)), 0 --> false
2569 // icmp sgt (bitcast (uitofp X)), -1 --> true
2570 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
2571 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
2572 return ConstantInt::getFalse(ITy);
2573 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
2574 return ConstantInt::getTrue(ITy);
2575 }
2576
2577 const APInt *C;
2578 if (!match(RHS, m_APInt(C)))
2579 return nullptr;
2580
2581 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2582 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2583 if (RHS_CR.isEmptySet())
2584 return ConstantInt::getFalse(ITy);
2585 if (RHS_CR.isFullSet())
2586 return ConstantInt::getTrue(ITy);
2587
2588 // Find the range of possible values for binary operators.
2589 unsigned Width = C->getBitWidth();
2590 APInt Lower = APInt(Width, 0);
2591 APInt Upper = APInt(Width, 0);
2592 if (auto *BO = dyn_cast<BinaryOperator>(LHS))
2593 setLimitsForBinOp(*BO, Lower, Upper);
2594
2595 ConstantRange LHS_CR =
2596 Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true);
2597
2598 if (auto *I = dyn_cast<Instruction>(LHS))
2599 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
2600 LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges));
2601
2602 if (!LHS_CR.isFullSet()) {
2603 if (RHS_CR.contains(LHS_CR))
2604 return ConstantInt::getTrue(ITy);
2605 if (RHS_CR.inverse().contains(LHS_CR))
2606 return ConstantInt::getFalse(ITy);
2607 }
2608
2609 return nullptr;
2610}
2611
2612/// TODO: A large part of this logic is duplicated in InstCombine's
2613/// foldICmpBinOp(). We should be able to share that and avoid the code
2614/// duplication.
2615static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2616 Value *RHS, const SimplifyQuery &Q,
2617 unsigned MaxRecurse) {
2618 Type *ITy = GetCompareTy(LHS); // The return type.
2619
2620 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2621 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2622 if (MaxRecurse && (LBO || RBO)) {
2623 // Analyze the case when either LHS or RHS is an add instruction.
2624 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2625 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2626 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2627 if (LBO && LBO->getOpcode() == Instruction::Add) {
2628 A = LBO->getOperand(0);
2629 B = LBO->getOperand(1);
2630 NoLHSWrapProblem =
2631 ICmpInst::isEquality(Pred) ||
2632 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
2633 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
2634 }
2635 if (RBO && RBO->getOpcode() == Instruction::Add) {
2636 C = RBO->getOperand(0);
2637 D = RBO->getOperand(1);
2638 NoRHSWrapProblem =
2639 ICmpInst::isEquality(Pred) ||
2640 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
2641 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
2642 }
2643
2644 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2645 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2646 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2647 Constant::getNullValue(RHS->getType()), Q,
2648 MaxRecurse - 1))
2649 return V;
2650
2651 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2652 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2653 if (Value *V =
2654 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2655 C == LHS ? D : C, Q, MaxRecurse - 1))
2656 return V;
2657
2658 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2659 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem &&
2660 NoRHSWrapProblem) {
2661 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2662 Value *Y, *Z;
2663 if (A == C) {
2664 // C + B == C + D -> B == D
2665 Y = B;
2666 Z = D;
2667 } else if (A == D) {
2668 // D + B == C + D -> B == C
2669 Y = B;
2670 Z = C;
2671 } else if (B == C) {
2672 // A + C == C + D -> A == D
2673 Y = A;
2674 Z = D;
2675 } else {
2676 assert(B == D)(static_cast <bool> (B == D) ? void (0) : __assert_fail
("B == D", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 2676, __extension__ __PRETTY_FUNCTION__))
;
2677 // A + D == C + D -> A == C
2678 Y = A;
2679 Z = C;
2680 }
2681 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
2682 return V;
2683 }
2684 }
2685
2686 {
2687 Value *Y = nullptr;
2688 // icmp pred (or X, Y), X
2689 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2690 if (Pred == ICmpInst::ICMP_ULT)
2691 return getFalse(ITy);
2692 if (Pred == ICmpInst::ICMP_UGE)
2693 return getTrue(ITy);
2694
2695 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2696 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2697 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2698 if (RHSKnown.isNonNegative() && YKnown.isNegative())
2699 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2700 if (RHSKnown.isNegative() || YKnown.isNonNegative())
2701 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2702 }
2703 }
2704 // icmp pred X, (or X, Y)
2705 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) {
2706 if (Pred == ICmpInst::ICMP_ULE)
2707 return getTrue(ITy);
2708 if (Pred == ICmpInst::ICMP_UGT)
2709 return getFalse(ITy);
2710
2711 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) {
2712 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2713 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2714 if (LHSKnown.isNonNegative() && YKnown.isNegative())
2715 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy);
2716 if (LHSKnown.isNegative() || YKnown.isNonNegative())
2717 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy);
2718 }
2719 }
2720 }
2721
2722 // icmp pred (and X, Y), X
2723 if (LBO && match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
2724 if (Pred == ICmpInst::ICMP_UGT)
2725 return getFalse(ITy);
2726 if (Pred == ICmpInst::ICMP_ULE)
2727 return getTrue(ITy);
2728 }
2729 // icmp pred X, (and X, Y)
2730 if (RBO && match(RBO, m_c_And(m_Value(), m_Specific(LHS)))) {
2731 if (Pred == ICmpInst::ICMP_UGE)
2732 return getTrue(ITy);
2733 if (Pred == ICmpInst::ICMP_ULT)
2734 return getFalse(ITy);
2735 }
2736
2737 // 0 - (zext X) pred C
2738 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
2739 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2740 if (RHSC->getValue().isStrictlyPositive()) {
2741 if (Pred == ICmpInst::ICMP_SLT)
2742 return ConstantInt::getTrue(RHSC->getContext());
2743 if (Pred == ICmpInst::ICMP_SGE)
2744 return ConstantInt::getFalse(RHSC->getContext());
2745 if (Pred == ICmpInst::ICMP_EQ)
2746 return ConstantInt::getFalse(RHSC->getContext());
2747 if (Pred == ICmpInst::ICMP_NE)
2748 return ConstantInt::getTrue(RHSC->getContext());
2749 }
2750 if (RHSC->getValue().isNonNegative()) {
2751 if (Pred == ICmpInst::ICMP_SLE)
2752 return ConstantInt::getTrue(RHSC->getContext());
2753 if (Pred == ICmpInst::ICMP_SGT)
2754 return ConstantInt::getFalse(RHSC->getContext());
2755 }
2756 }
2757 }
2758
2759 // icmp pred (urem X, Y), Y
2760 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2761 switch (Pred) {
2762 default:
2763 break;
2764 case ICmpInst::ICMP_SGT:
2765 case ICmpInst::ICMP_SGE: {
2766 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2767 if (!Known.isNonNegative())
2768 break;
2769 LLVM_FALLTHROUGH[[clang::fallthrough]];
2770 }
2771 case ICmpInst::ICMP_EQ:
2772 case ICmpInst::ICMP_UGT:
2773 case ICmpInst::ICMP_UGE:
2774 return getFalse(ITy);
2775 case ICmpInst::ICMP_SLT:
2776 case ICmpInst::ICMP_SLE: {
2777 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2778 if (!Known.isNonNegative())
2779 break;
2780 LLVM_FALLTHROUGH[[clang::fallthrough]];
2781 }
2782 case ICmpInst::ICMP_NE:
2783 case ICmpInst::ICMP_ULT:
2784 case ICmpInst::ICMP_ULE:
2785 return getTrue(ITy);
2786 }
2787 }
2788
2789 // icmp pred X, (urem Y, X)
2790 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
2791 switch (Pred) {
2792 default:
2793 break;
2794 case ICmpInst::ICMP_SGT:
2795 case ICmpInst::ICMP_SGE: {
2796 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2797 if (!Known.isNonNegative())
2798 break;
2799 LLVM_FALLTHROUGH[[clang::fallthrough]];
2800 }
2801 case ICmpInst::ICMP_NE:
2802 case ICmpInst::ICMP_UGT:
2803 case ICmpInst::ICMP_UGE:
2804 return getTrue(ITy);
2805 case ICmpInst::ICMP_SLT:
2806 case ICmpInst::ICMP_SLE: {
2807 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2808 if (!Known.isNonNegative())
2809 break;
2810 LLVM_FALLTHROUGH[[clang::fallthrough]];
2811 }
2812 case ICmpInst::ICMP_EQ:
2813 case ICmpInst::ICMP_ULT:
2814 case ICmpInst::ICMP_ULE:
2815 return getFalse(ITy);
2816 }
2817 }
2818
2819 // x >> y <=u x
2820 // x udiv y <=u x.
2821 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2822 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) {
2823 // icmp pred (X op Y), X
2824 if (Pred == ICmpInst::ICMP_UGT)
2825 return getFalse(ITy);
2826 if (Pred == ICmpInst::ICMP_ULE)
2827 return getTrue(ITy);
2828 }
2829
2830 // x >=u x >> y
2831 // x >=u x udiv y.
2832 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) ||
2833 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) {
2834 // icmp pred X, (X op Y)
2835 if (Pred == ICmpInst::ICMP_ULT)
2836 return getFalse(ITy);
2837 if (Pred == ICmpInst::ICMP_UGE)
2838 return getTrue(ITy);
2839 }
2840
2841 // handle:
2842 // CI2 << X == CI
2843 // CI2 << X != CI
2844 //
2845 // where CI2 is a power of 2 and CI isn't
2846 if (auto *CI = dyn_cast<ConstantInt>(RHS)) {
2847 const APInt *CI2Val, *CIVal = &CI->getValue();
2848 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) &&
2849 CI2Val->isPowerOf2()) {
2850 if (!CIVal->isPowerOf2()) {
2851 // CI2 << X can equal zero in some circumstances,
2852 // this simplification is unsafe if CI is zero.
2853 //
2854 // We know it is safe if:
2855 // - The shift is nsw, we can't shift out the one bit.
2856 // - The shift is nuw, we can't shift out the one bit.
2857 // - CI2 is one
2858 // - CI isn't zero
2859 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() ||
2860 CI2Val->isOneValue() || !CI->isZero()) {
2861 if (Pred == ICmpInst::ICMP_EQ)
2862 return ConstantInt::getFalse(RHS->getContext());
2863 if (Pred == ICmpInst::ICMP_NE)
2864 return ConstantInt::getTrue(RHS->getContext());
2865 }
2866 }
2867 if (CIVal->isSignMask() && CI2Val->isOneValue()) {
2868 if (Pred == ICmpInst::ICMP_UGT)
2869 return ConstantInt::getFalse(RHS->getContext());
2870 if (Pred == ICmpInst::ICMP_ULE)
2871 return ConstantInt::getTrue(RHS->getContext());
2872 }
2873 }
2874 }
2875
2876 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
2877 LBO->getOperand(1) == RBO->getOperand(1)) {
2878 switch (LBO->getOpcode()) {
2879 default:
2880 break;
2881 case Instruction::UDiv:
2882 case Instruction::LShr:
2883 if (ICmpInst::isSigned(Pred) || !LBO->isExact() || !RBO->isExact())
2884 break;
2885 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2886 RBO->getOperand(0), Q, MaxRecurse - 1))
2887 return V;
2888 break;
2889 case Instruction::SDiv:
2890 if (!ICmpInst::isEquality(Pred) || !LBO->isExact() || !RBO->isExact())
2891 break;
2892 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2893 RBO->getOperand(0), Q, MaxRecurse - 1))
2894 return V;
2895 break;
2896 case Instruction::AShr:
2897 if (!LBO->isExact() || !RBO->isExact())
2898 break;
2899 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2900 RBO->getOperand(0), Q, MaxRecurse - 1))
2901 return V;
2902 break;
2903 case Instruction::Shl: {
2904 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap();
2905 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap();
2906 if (!NUW && !NSW)
2907 break;
2908 if (!NSW && ICmpInst::isSigned(Pred))
2909 break;
2910 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
2911 RBO->getOperand(0), Q, MaxRecurse - 1))
2912 return V;
2913 break;
2914 }
2915 }
2916 }
2917 return nullptr;
2918}
2919
2920/// Simplify integer comparisons where at least one operand of the compare
2921/// matches an integer min/max idiom.
2922static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
2923 Value *RHS, const SimplifyQuery &Q,
2924 unsigned MaxRecurse) {
2925 Type *ITy = GetCompareTy(LHS); // The return type.
2926 Value *A, *B;
2927 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
2928 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
2929
2930 // Signed variants on "max(a,b)>=a -> true".
2931 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
2932 if (A != RHS)
2933 std::swap(A, B); // smax(A, B) pred A.
2934 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2935 // We analyze this as smax(A, B) pred A.
2936 P = Pred;
2937 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
2938 (A == LHS || B == LHS)) {
2939 if (A != LHS)
2940 std::swap(A, B); // A pred smax(A, B).
2941 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
2942 // We analyze this as smax(A, B) swapped-pred A.
2943 P = CmpInst::getSwappedPredicate(Pred);
2944 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
2945 (A == RHS || B == RHS)) {
2946 if (A != RHS)
2947 std::swap(A, B); // smin(A, B) pred A.
2948 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2949 // We analyze this as smax(-A, -B) swapped-pred -A.
2950 // Note that we do not need to actually form -A or -B thanks to EqP.
2951 P = CmpInst::getSwappedPredicate(Pred);
2952 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
2953 (A == LHS || B == LHS)) {
2954 if (A != LHS)
2955 std::swap(A, B); // A pred smin(A, B).
2956 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
2957 // We analyze this as smax(-A, -B) pred -A.
2958 // Note that we do not need to actually form -A or -B thanks to EqP.
2959 P = Pred;
2960 }
2961 if (P != CmpInst::BAD_ICMP_PREDICATE) {
2962 // Cases correspond to "max(A, B) p A".
2963 switch (P) {
2964 default:
2965 break;
2966 case CmpInst::ICMP_EQ:
2967 case CmpInst::ICMP_SLE:
2968 // Equivalent to "A EqP B". This may be the same as the condition tested
2969 // in the max/min; if so, we can just return that.
2970 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
2971 return V;
2972 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
2973 return V;
2974 // Otherwise, see if "A EqP B" simplifies.
2975 if (MaxRecurse)
2976 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
2977 return V;
2978 break;
2979 case CmpInst::ICMP_NE:
2980 case CmpInst::ICMP_SGT: {
2981 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
2982 // Equivalent to "A InvEqP B". This may be the same as the condition
2983 // tested in the max/min; if so, we can just return that.
2984 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
2985 return V;
2986 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
2987 return V;
2988 // Otherwise, see if "A InvEqP B" simplifies.
2989 if (MaxRecurse)
2990 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
2991 return V;
2992 break;
2993 }
2994 case CmpInst::ICMP_SGE:
2995 // Always true.
2996 return getTrue(ITy);
2997 case CmpInst::ICMP_SLT:
2998 // Always false.
2999 return getFalse(ITy);
3000 }
3001 }
3002
3003 // Unsigned variants on "max(a,b)>=a -> true".
3004 P = CmpInst::BAD_ICMP_PREDICATE;
3005 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3006 if (A != RHS)
3007 std::swap(A, B); // umax(A, B) pred A.
3008 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3009 // We analyze this as umax(A, B) pred A.
3010 P = Pred;
3011 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3012 (A == LHS || B == LHS)) {
3013 if (A != LHS)
3014 std::swap(A, B); // A pred umax(A, B).
3015 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3016 // We analyze this as umax(A, B) swapped-pred A.
3017 P = CmpInst::getSwappedPredicate(Pred);
3018 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3019 (A == RHS || B == RHS)) {
3020 if (A != RHS)
3021 std::swap(A, B); // umin(A, B) pred A.
3022 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3023 // We analyze this as umax(-A, -B) swapped-pred -A.
3024 // Note that we do not need to actually form -A or -B thanks to EqP.
3025 P = CmpInst::getSwappedPredicate(Pred);
3026 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3027 (A == LHS || B == LHS)) {
3028 if (A != LHS)
3029 std::swap(A, B); // A pred umin(A, B).
3030 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3031 // We analyze this as umax(-A, -B) pred -A.
3032 // Note that we do not need to actually form -A or -B thanks to EqP.
3033 P = Pred;
3034 }
3035 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3036 // Cases correspond to "max(A, B) p A".
3037 switch (P) {
3038 default:
3039 break;
3040 case CmpInst::ICMP_EQ:
3041 case CmpInst::ICMP_ULE:
3042 // Equivalent to "A EqP B". This may be the same as the condition tested
3043 // in the max/min; if so, we can just return that.
3044 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3045 return V;
3046 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3047 return V;
3048 // Otherwise, see if "A EqP B" simplifies.
3049 if (MaxRecurse)
3050 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3051 return V;
3052 break;
3053 case CmpInst::ICMP_NE:
3054 case CmpInst::ICMP_UGT: {
3055 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3056 // Equivalent to "A InvEqP B". This may be the same as the condition
3057 // tested in the max/min; if so, we can just return that.
3058 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3059 return V;
3060 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3061 return V;
3062 // Otherwise, see if "A InvEqP B" simplifies.
3063 if (MaxRecurse)
3064 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3065 return V;
3066 break;
3067 }
3068 case CmpInst::ICMP_UGE:
3069 // Always true.
3070 return getTrue(ITy);
3071 case CmpInst::ICMP_ULT:
3072 // Always false.
3073 return getFalse(ITy);
3074 }
3075 }
3076
3077 // Variants on "max(x,y) >= min(x,z)".
3078 Value *C, *D;
3079 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3080 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3081 (A == C || A == D || B == C || B == D)) {
3082 // max(x, ?) pred min(x, ?).
3083 if (Pred == CmpInst::ICMP_SGE)
3084 // Always true.
3085 return getTrue(ITy);
3086 if (Pred == CmpInst::ICMP_SLT)
3087 // Always false.
3088 return getFalse(ITy);
3089 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3090 match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
3091 (A == C || A == D || B == C || B == D)) {
3092 // min(x, ?) pred max(x, ?).
3093 if (Pred == CmpInst::ICMP_SLE)
3094 // Always true.
3095 return getTrue(ITy);
3096 if (Pred == CmpInst::ICMP_SGT)
3097 // Always false.
3098 return getFalse(ITy);
3099 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3100 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3101 (A == C || A == D || B == C || B == D)) {
3102 // max(x, ?) pred min(x, ?).
3103 if (Pred == CmpInst::ICMP_UGE)
3104 // Always true.
3105 return getTrue(ITy);
3106 if (Pred == CmpInst::ICMP_ULT)
3107 // Always false.
3108 return getFalse(ITy);
3109 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3110 match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
3111 (A == C || A == D || B == C || B == D)) {
3112 // min(x, ?) pred max(x, ?).
3113 if (Pred == CmpInst::ICMP_ULE)
3114 // Always true.
3115 return getTrue(ITy);
3116 if (Pred == CmpInst::ICMP_UGT)
3117 // Always false.
3118 return getFalse(ITy);
3119 }
3120
3121 return nullptr;
3122}
3123
3124/// Given operands for an ICmpInst, see if we can fold the result.
3125/// If not, this returns null.
3126static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3127 const SimplifyQuery &Q, unsigned MaxRecurse) {
3128 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3129 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!")(static_cast <bool> (CmpInst::isIntPredicate(Pred) &&
"Not an integer compare!") ? void (0) : __assert_fail ("CmpInst::isIntPredicate(Pred) && \"Not an integer compare!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 3129, __extension__ __PRETTY_FUNCTION__))
;
3130
3131 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3132 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3133 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3134
3135 // If we have a constant, make sure it is on the RHS.
3136 std::swap(LHS, RHS);
3137 Pred = CmpInst::getSwappedPredicate(Pred);
3138 }
3139
3140 Type *ITy = GetCompareTy(LHS); // The return type.
3141
3142 // icmp X, X -> true/false
3143 // icmp X, undef -> true/false because undef could be X.
3144 if (LHS == RHS || isa<UndefValue>(RHS))
3145 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3146
3147 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3148 return V;
3149
3150 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3151 return V;
3152
3153 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS))
3154 return V;
3155
3156 // If both operands have range metadata, use the metadata
3157 // to simplify the comparison.
3158 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3159 auto RHS_Instr = cast<Instruction>(RHS);
3160 auto LHS_Instr = cast<Instruction>(LHS);
3161
3162 if (RHS_Instr->getMetadata(LLVMContext::MD_range) &&
3163 LHS_Instr->getMetadata(LLVMContext::MD_range)) {
3164 auto RHS_CR = getConstantRangeFromMetadata(
3165 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3166 auto LHS_CR = getConstantRangeFromMetadata(
3167 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3168
3169 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR);
3170 if (Satisfied_CR.contains(LHS_CR))
3171 return ConstantInt::getTrue(RHS->getContext());
3172
3173 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion(
3174 CmpInst::getInversePredicate(Pred), RHS_CR);
3175 if (InversedSatisfied_CR.contains(LHS_CR))
3176 return ConstantInt::getFalse(RHS->getContext());
3177 }
3178 }
3179
3180 // Compare of cast, for example (zext X) != 0 -> X != 0
3181 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3182 Instruction *LI = cast<CastInst>(LHS);
3183 Value *SrcOp = LI->getOperand(0);
3184 Type *SrcTy = SrcOp->getType();
3185 Type *DstTy = LI->getType();
3186
3187 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3188 // if the integer type is the same size as the pointer type.
3189 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3190 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3191 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3192 // Transfer the cast to the constant.
3193 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3194 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3195 Q, MaxRecurse-1))
3196 return V;
3197 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3198 if (RI->getOperand(0)->getType() == SrcTy)
3199 // Compare without the cast.
3200 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3201 Q, MaxRecurse-1))
3202 return V;
3203 }
3204 }
3205
3206 if (isa<ZExtInst>(LHS)) {
3207 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3208 // same type.
3209 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3210 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3211 // Compare X and Y. Note that signed predicates become unsigned.
3212 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3213 SrcOp, RI->getOperand(0), Q,
3214 MaxRecurse-1))
3215 return V;
3216 }
3217 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3218 // too. If not, then try to deduce the result of the comparison.
3219 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3220 // Compute the constant that would happen if we truncated to SrcTy then
3221 // reextended to DstTy.
3222 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3223 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3224
3225 // If the re-extended constant didn't change then this is effectively
3226 // also a case of comparing two zero-extended values.
3227 if (RExt == CI && MaxRecurse)
3228 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3229 SrcOp, Trunc, Q, MaxRecurse-1))
3230 return V;
3231
3232 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3233 // there. Use this to work out the result of the comparison.
3234 if (RExt != CI) {
3235 switch (Pred) {
3236 default: llvm_unreachable("Unknown ICmp predicate!")::llvm::llvm_unreachable_internal("Unknown ICmp predicate!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 3236)
;
3237 // LHS <u RHS.
3238 case ICmpInst::ICMP_EQ:
3239 case ICmpInst::ICMP_UGT:
3240 case ICmpInst::ICMP_UGE:
3241 return ConstantInt::getFalse(CI->getContext());
3242
3243 case ICmpInst::ICMP_NE:
3244 case ICmpInst::ICMP_ULT:
3245 case ICmpInst::ICMP_ULE:
3246 return ConstantInt::getTrue(CI->getContext());
3247
3248 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3249 // is non-negative then LHS <s RHS.
3250 case ICmpInst::ICMP_SGT:
3251 case ICmpInst::ICMP_SGE:
3252 return CI->getValue().isNegative() ?
3253 ConstantInt::getTrue(CI->getContext()) :
3254 ConstantInt::getFalse(CI->getContext());
3255
3256 case ICmpInst::ICMP_SLT:
3257 case ICmpInst::ICMP_SLE:
3258 return CI->getValue().isNegative() ?
3259 ConstantInt::getFalse(CI->getContext()) :
3260 ConstantInt::getTrue(CI->getContext());
3261 }
3262 }
3263 }
3264 }
3265
3266 if (isa<SExtInst>(LHS)) {
3267 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3268 // same type.
3269 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3270 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3271 // Compare X and Y. Note that the predicate does not change.
3272 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3273 Q, MaxRecurse-1))
3274 return V;
3275 }
3276 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3277 // too. If not, then try to deduce the result of the comparison.
3278 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3279 // Compute the constant that would happen if we truncated to SrcTy then
3280 // reextended to DstTy.
3281 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3282 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3283
3284 // If the re-extended constant didn't change then this is effectively
3285 // also a case of comparing two sign-extended values.
3286 if (RExt == CI && MaxRecurse)
3287 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3288 return V;
3289
3290 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3291 // bits there. Use this to work out the result of the comparison.
3292 if (RExt != CI) {
3293 switch (Pred) {
3294 default: llvm_unreachable("Unknown ICmp predicate!")::llvm::llvm_unreachable_internal("Unknown ICmp predicate!", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 3294)
;
3295 case ICmpInst::ICMP_EQ:
3296 return ConstantInt::getFalse(CI->getContext());
3297 case ICmpInst::ICMP_NE:
3298 return ConstantInt::getTrue(CI->getContext());
3299
3300 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3301 // LHS >s RHS.
3302 case ICmpInst::ICMP_SGT:
3303 case ICmpInst::ICMP_SGE:
3304 return CI->getValue().isNegative() ?
3305 ConstantInt::getTrue(CI->getContext()) :
3306 ConstantInt::getFalse(CI->getContext());
3307 case ICmpInst::ICMP_SLT:
3308 case ICmpInst::ICMP_SLE:
3309 return CI->getValue().isNegative() ?
3310 ConstantInt::getFalse(CI->getContext()) :
3311 ConstantInt::getTrue(CI->getContext());
3312
3313 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3314 // LHS >u RHS.
3315 case ICmpInst::ICMP_UGT:
3316 case ICmpInst::ICMP_UGE:
3317 // Comparison is true iff the LHS <s 0.
3318 if (MaxRecurse)
3319 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3320 Constant::getNullValue(SrcTy),
3321 Q, MaxRecurse-1))
3322 return V;
3323 break;
3324 case ICmpInst::ICMP_ULT:
3325 case ICmpInst::ICMP_ULE:
3326 // Comparison is true iff the LHS >=s 0.
3327 if (MaxRecurse)
3328 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3329 Constant::getNullValue(SrcTy),
3330 Q, MaxRecurse-1))
3331 return V;
3332 break;
3333 }
3334 }
3335 }
3336 }
3337 }
3338
3339 // icmp eq|ne X, Y -> false|true if X != Y
3340 if (ICmpInst::isEquality(Pred) &&
3341 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) {
3342 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3343 }
3344
3345 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3346 return V;
3347
3348 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3349 return V;
3350
3351 // Simplify comparisons of related pointers using a powerful, recursive
3352 // GEP-walk when we have target data available..
3353 if (LHS->getType()->isPointerTy())
3354 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, LHS,
3355 RHS))
3356 return C;
3357 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3358 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3359 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3360 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3361 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3362 Q.DL.getTypeSizeInBits(CRHS->getType()))
3363 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
3364 CLHS->getPointerOperand(),
3365 CRHS->getPointerOperand()))
3366 return C;
3367
3368 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3369 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3370 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3371 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3372 (ICmpInst::isEquality(Pred) ||
3373 (GLHS->isInBounds() && GRHS->isInBounds() &&
3374 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3375 // The bases are equal and the indices are constant. Build a constant
3376 // expression GEP with the same indices and a null base pointer to see
3377 // what constant folding can make out of it.
3378 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3379 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
3380 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3381 GLHS->getSourceElementType(), Null, IndicesLHS);
3382
3383 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3384 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3385 GLHS->getSourceElementType(), Null, IndicesRHS);
3386 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3387 }
3388 }
3389 }
3390
3391 // If the comparison is with the result of a select instruction, check whether
3392 // comparing with either branch of the select always yields the same value.
3393 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3394 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3395 return V;
3396
3397 // If the comparison is with the result of a phi instruction, check whether
3398 // doing the compare with each incoming phi value yields a common result.
3399 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3400 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3401 return V;
3402
3403 return nullptr;
3404}
3405
3406Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3407 const SimplifyQuery &Q) {
3408 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3409}
3410
3411/// Given operands for an FCmpInst, see if we can fold the result.
3412/// If not, this returns null.
3413static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3414 FastMathFlags FMF, const SimplifyQuery &Q,
3415 unsigned MaxRecurse) {
3416 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3417 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!")(static_cast <bool> (CmpInst::isFPPredicate(Pred) &&
"Not an FP compare!") ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Pred) && \"Not an FP compare!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 3417, __extension__ __PRETTY_FUNCTION__))
;
3418
3419 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4
Taking false branch
3420 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3421 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3422
3423 // If we have a constant, make sure it is on the RHS.
3424 std::swap(LHS, RHS);
3425 Pred = CmpInst::getSwappedPredicate(Pred);
3426 }
3427
3428 // Fold trivial predicates.
3429 Type *RetTy = GetCompareTy(LHS);
3430 if (Pred == FCmpInst::FCMP_FALSE)
5
Assuming 'Pred' is not equal to FCMP_FALSE
6
Taking false branch
3431 return getFalse(RetTy);
3432 if (Pred == FCmpInst::FCMP_TRUE)
7
Assuming 'Pred' is not equal to FCMP_TRUE
8
Taking false branch
3433 return getTrue(RetTy);
3434
3435 // UNO/ORD predicates can be trivially folded if NaNs are ignored.
3436 if (FMF.noNaNs()) {
9
Taking false branch
3437 if (Pred == FCmpInst::FCMP_UNO)
3438 return getFalse(RetTy);
3439 if (Pred == FCmpInst::FCMP_ORD)
3440 return getTrue(RetTy);
3441 }
3442
3443 // NaN is unordered; NaN is not ordered.
3444 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&(static_cast <bool> ((FCmpInst::isOrdered(Pred) || FCmpInst
::isUnordered(Pred)) && "Comparison must be either ordered or unordered"
) ? void (0) : __assert_fail ("(FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) && \"Comparison must be either ordered or unordered\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 3445, __extension__ __PRETTY_FUNCTION__))
3445 "Comparison must be either ordered or unordered")(static_cast <bool> ((FCmpInst::isOrdered(Pred) || FCmpInst
::isUnordered(Pred)) && "Comparison must be either ordered or unordered"
) ? void (0) : __assert_fail ("(FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) && \"Comparison must be either ordered or unordered\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 3445, __extension__ __PRETTY_FUNCTION__))
;
3446 if (match(RHS, m_NaN()))
10
Assuming the condition is false
11
Taking false branch
3447 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3448
3449 // fcmp pred x, undef and fcmp pred undef, x
3450 // fold to true if unordered, false if ordered
3451 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
12
Taking false branch
3452 // Choosing NaN for the undef will always make unordered comparison succeed
3453 // and ordered comparison fail.
3454 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3455 }
3456
3457 // fcmp x,x -> true/false. Not all compares are foldable.
3458 if (LHS == RHS) {
13
Assuming 'LHS' is not equal to 'RHS'
14
Taking false branch
3459 if (CmpInst::isTrueWhenEqual(Pred))
3460 return getTrue(RetTy);
3461 if (CmpInst::isFalseWhenEqual(Pred))
3462 return getFalse(RetTy);
3463 }
3464
3465 // Handle fcmp with constant RHS.
3466 const APFloat *C;
3467 if (match(RHS, m_APFloat(C))) {
15
Taking false branch
3468 // Check whether the constant is an infinity.
3469 if (C->isInfinity()) {
3470 if (C->isNegative()) {
3471 switch (Pred) {
3472 case FCmpInst::FCMP_OLT:
3473 // No value is ordered and less than negative infinity.
3474 return getFalse(RetTy);
3475 case FCmpInst::FCMP_UGE:
3476 // All values are unordered with or at least negative infinity.
3477 return getTrue(RetTy);
3478 default:
3479 break;
3480 }
3481 } else {
3482 switch (Pred) {
3483 case FCmpInst::FCMP_OGT:
3484 // No value is ordered and greater than infinity.
3485 return getFalse(RetTy);
3486 case FCmpInst::FCMP_ULE:
3487 // All values are unordered with and at most infinity.
3488 return getTrue(RetTy);
3489 default:
3490 break;
3491 }
3492 }
3493 }
3494 if (C->isZero()) {
3495 switch (Pred) {
3496 case FCmpInst::FCMP_UGE:
3497 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3498 return getTrue(RetTy);
3499 break;
3500 case FCmpInst::FCMP_OLT:
3501 // X < 0
3502 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3503 return getFalse(RetTy);
3504 break;
3505 default:
3506 break;
3507 }
3508 } else if (C->isNegative()) {
3509 assert(!C->isNaN() && "Unexpected NaN constant!")(static_cast <bool> (!C->isNaN() && "Unexpected NaN constant!"
) ? void (0) : __assert_fail ("!C->isNaN() && \"Unexpected NaN constant!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 3509, __extension__ __PRETTY_FUNCTION__))
;
3510 // TODO: We can catch more cases by using a range check rather than
3511 // relying on CannotBeOrderedLessThanZero.
3512 switch (Pred) {
3513 case FCmpInst::FCMP_UGE:
3514 case FCmpInst::FCMP_UGT:
3515 case FCmpInst::FCMP_UNE:
3516 // (X >= 0) implies (X > C) when (C < 0)
3517 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3518 return getTrue(RetTy);
3519 break;
3520 case FCmpInst::FCMP_OEQ:
3521 case FCmpInst::FCMP_OLE:
3522 case FCmpInst::FCMP_OLT:
3523 // (X >= 0) implies !(X < C) when (C < 0)
3524 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3525 return getFalse(RetTy);
3526 break;
3527 default:
3528 break;
3529 }
3530 }
3531 }
3532
3533 // If the comparison is with the result of a select instruction, check whether
3534 // comparing with either branch of the select always yields the same value.
3535 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
16
Taking true branch
3536 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
17
Calling 'ThreadCmpOverSelect'
3537 return V;
3538
3539 // If the comparison is with the result of a phi instruction, check whether
3540 // doing the compare with each incoming phi value yields a common result.
3541 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3542 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3543 return V;
3544
3545 return nullptr;
3546}
3547
3548Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3549 FastMathFlags FMF, const SimplifyQuery &Q) {
3550 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3551}
3552
3553/// See if V simplifies when its operand Op is replaced with RepOp.
3554static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3555 const SimplifyQuery &Q,
3556 unsigned MaxRecurse) {
3557 // Trivial replacement.
3558 if (V == Op)
3559 return RepOp;
3560
3561 // We cannot replace a constant, and shouldn't even try.
3562 if (isa<Constant>(Op))
3563 return nullptr;
3564
3565 auto *I = dyn_cast<Instruction>(V);
3566 if (!I)
3567 return nullptr;
3568
3569 // If this is a binary operator, try to simplify it with the replaced op.
3570 if (auto *B = dyn_cast<BinaryOperator>(I)) {
3571 // Consider:
3572 // %cmp = icmp eq i32 %x, 2147483647
3573 // %add = add nsw i32 %x, 1
3574 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3575 //
3576 // We can't replace %sel with %add unless we strip away the flags.
3577 if (isa<OverflowingBinaryOperator>(B))
3578 if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
3579 return nullptr;
3580 if (isa<PossiblyExactOperator>(B))
3581 if (B->isExact())
3582 return nullptr;
3583
3584 if (MaxRecurse) {
3585 if (B->getOperand(0) == Op)
3586 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
3587 MaxRecurse - 1);
3588 if (B->getOperand(1) == Op)
3589 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
3590 MaxRecurse - 1);
3591 }
3592 }
3593
3594 // Same for CmpInsts.
3595 if (CmpInst *C = dyn_cast<CmpInst>(I)) {
3596 if (MaxRecurse) {
3597 if (C->getOperand(0) == Op)
3598 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
3599 MaxRecurse - 1);
3600 if (C->getOperand(1) == Op)
3601 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
3602 MaxRecurse - 1);
3603 }
3604 }
3605
3606 // Same for GEPs.
3607 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3608 if (MaxRecurse) {
3609 SmallVector<Value *, 8> NewOps(GEP->getNumOperands());
3610 transform(GEP->operands(), NewOps.begin(),
3611 [&](Value *V) { return V == Op ? RepOp : V; });
3612 return SimplifyGEPInst(GEP->getSourceElementType(), NewOps, Q,
3613 MaxRecurse - 1);
3614 }
3615 }
3616
3617 // TODO: We could hand off more cases to instsimplify here.
3618
3619 // If all operands are constant after substituting Op for RepOp then we can
3620 // constant fold the instruction.
3621 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
3622 // Build a list of all constant operands.
3623 SmallVector<Constant *, 8> ConstOps;
3624 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3625 if (I->getOperand(i) == Op)
3626 ConstOps.push_back(CRepOp);
3627 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
3628 ConstOps.push_back(COp);
3629 else
3630 break;
3631 }
3632
3633 // All operands were constants, fold it.
3634 if (ConstOps.size() == I->getNumOperands()) {
3635 if (CmpInst *C = dyn_cast<CmpInst>(I))
3636 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3637 ConstOps[1], Q.DL, Q.TLI);
3638
3639 if (LoadInst *LI = dyn_cast<LoadInst>(I))
3640 if (!LI->isVolatile())
3641 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3642
3643 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3644 }
3645 }
3646
3647 return nullptr;
3648}
3649
3650/// Try to simplify a select instruction when its condition operand is an
3651/// integer comparison where one operand of the compare is a constant.
3652static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3653 const APInt *Y, bool TrueWhenUnset) {
3654 const APInt *C;
3655
3656 // (X & Y) == 0 ? X & ~Y : X --> X
3657 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
3658 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3659 *Y == ~*C)
3660 return TrueWhenUnset ? FalseVal : TrueVal;
3661
3662 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
3663 // (X & Y) != 0 ? X : X & ~Y --> X
3664 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3665 *Y == ~*C)
3666 return TrueWhenUnset ? FalseVal : TrueVal;
3667
3668 if (Y->isPowerOf2()) {
3669 // (X & Y) == 0 ? X | Y : X --> X | Y
3670 // (X & Y) != 0 ? X | Y : X --> X
3671 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
3672 *Y == *C)
3673 return TrueWhenUnset ? TrueVal : FalseVal;
3674
3675 // (X & Y) == 0 ? X : X | Y --> X
3676 // (X & Y) != 0 ? X : X | Y --> X | Y
3677 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
3678 *Y == *C)
3679 return TrueWhenUnset ? TrueVal : FalseVal;
3680 }
3681
3682 return nullptr;
3683}
3684
3685/// An alternative way to test if a bit is set or not uses sgt/slt instead of
3686/// eq/ne.
3687static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
3688 ICmpInst::Predicate Pred,
3689 Value *TrueVal, Value *FalseVal) {
3690 Value *X;
3691 APInt Mask;
3692 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
3693 return nullptr;
3694
3695 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
3696 Pred == ICmpInst::ICMP_EQ);
3697}
3698
3699/// Try to simplify a select instruction when its condition operand is an
3700/// integer comparison.
3701static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
3702 Value *FalseVal, const SimplifyQuery &Q,
3703 unsigned MaxRecurse) {
3704 ICmpInst::Predicate Pred;
3705 Value *CmpLHS, *CmpRHS;
3706 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
3707 return nullptr;
3708
3709 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) {
3710 Value *X;
3711 const APInt *Y;
3712 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
3713 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
3714 Pred == ICmpInst::ICMP_EQ))
3715 return V;
3716 }
3717
3718 // Check for other compares that behave like bit test.
3719 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred,
3720 TrueVal, FalseVal))
3721 return V;
3722
3723 // If we have an equality comparison, then we know the value in one of the
3724 // arms of the select. See if substituting this value into the arm and
3725 // simplifying the result yields the same value as the other arm.
3726 if (Pred == ICmpInst::ICMP_EQ) {
3727 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3728 TrueVal ||
3729 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3730 TrueVal)
3731 return FalseVal;
3732 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3733 FalseVal ||
3734 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3735 FalseVal)
3736 return FalseVal;
3737 } else if (Pred == ICmpInst::ICMP_NE) {
3738 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3739 FalseVal ||
3740 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3741 FalseVal)
3742 return TrueVal;
3743 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
3744 TrueVal ||
3745 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
3746 TrueVal)
3747 return TrueVal;
3748 }
3749
3750 return nullptr;
3751}
3752
3753/// Given operands for a SelectInst, see if we can fold the result.
3754/// If not, this returns null.
3755static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
3756 const SimplifyQuery &Q, unsigned MaxRecurse) {
3757 if (auto *CondC = dyn_cast<Constant>(Cond)) {
3758 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
3759 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
3760 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
3761
3762 // select undef, X, Y -> X or Y
3763 if (isa<UndefValue>(CondC))
3764 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
3765
3766 // TODO: Vector constants with undef elements don't simplify.
3767
3768 // select true, X, Y -> X
3769 if (CondC->isAllOnesValue())
3770 return TrueVal;
3771 // select false, X, Y -> Y
3772 if (CondC->isNullValue())
3773 return FalseVal;
3774 }
3775
3776 // select ?, X, X -> X
3777 if (TrueVal == FalseVal)
3778 return TrueVal;
3779
3780 if (isa<UndefValue>(TrueVal)) // select ?, undef, X -> X
3781 return FalseVal;
3782 if (isa<UndefValue>(FalseVal)) // select ?, X, undef -> X
3783 return TrueVal;
3784
3785 if (Value *V =
3786 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
3787 return V;
3788
3789 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
3790 return V;
3791
3792 return nullptr;
3793}
3794
3795Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
3796 const SimplifyQuery &Q) {
3797 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
3798}
3799
3800/// Given operands for an GetElementPtrInst, see if we can fold the result.
3801/// If not, this returns null.
3802static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3803 const SimplifyQuery &Q, unsigned) {
3804 // The type of the GEP pointer operand.
3805 unsigned AS =
3806 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
3807
3808 // getelementptr P -> P.
3809 if (Ops.size() == 1)
3810 return Ops[0];
3811
3812 // Compute the (pointer) type returned by the GEP instruction.
3813 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
3814 Type *GEPTy = PointerType::get(LastType, AS);
3815 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
3816 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3817 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType()))
3818 GEPTy = VectorType::get(GEPTy, VT->getNumElements());
3819
3820 if (isa<UndefValue>(Ops[0]))
3821 return UndefValue::get(GEPTy);
3822
3823 if (Ops.size() == 2) {
3824 // getelementptr P, 0 -> P.
3825 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy)
3826 return Ops[0];
3827
3828 Type *Ty = SrcTy;
3829 if (Ty->isSized()) {
3830 Value *P;
3831 uint64_t C;
3832 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
3833 // getelementptr P, N -> P if P points to a type of zero size.
3834 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy)
3835 return Ops[0];
3836
3837 // The following transforms are only safe if the ptrtoint cast
3838 // doesn't truncate the pointers.
3839 if (Ops[1]->getType()->getScalarSizeInBits() ==
3840 Q.DL.getIndexSizeInBits(AS)) {
3841 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
3842 if (match(P, m_Zero()))
3843 return Constant::getNullValue(GEPTy);
3844 Value *Temp;
3845 if (match(P, m_PtrToInt(m_Value(Temp))))
3846 if (Temp->getType() == GEPTy)
3847 return Temp;
3848 return nullptr;
3849 };
3850
3851 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
3852 if (TyAllocSize == 1 &&
3853 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
3854 if (Value *R = PtrToIntOrZero(P))
3855 return R;
3856
3857 // getelementptr V, (ashr (sub P, V), C) -> Q
3858 // if P points to a type of size 1 << C.
3859 if (match(Ops[1],
3860 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3861 m_ConstantInt(C))) &&
3862 TyAllocSize == 1ULL << C)
3863 if (Value *R = PtrToIntOrZero(P))
3864 return R;
3865
3866 // getelementptr V, (sdiv (sub P, V), C) -> Q
3867 // if P points to a type of size C.
3868 if (match(Ops[1],
3869 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
3870 m_SpecificInt(TyAllocSize))))
3871 if (Value *R = PtrToIntOrZero(P))
3872 return R;
3873 }
3874 }
3875 }
3876
3877 if (Q.DL.getTypeAllocSize(LastType) == 1 &&
3878 all_of(Ops.slice(1).drop_back(1),
3879 [](Value *Idx) { return match(Idx, m_Zero()); })) {
3880 unsigned IdxWidth =
3881 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
3882 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
3883 APInt BasePtrOffset(IdxWidth, 0);
3884 Value *StrippedBasePtr =
3885 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
3886 BasePtrOffset);
3887
3888 // gep (gep V, C), (sub 0, V) -> C
3889 if (match(Ops.back(),
3890 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) {
3891 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
3892 return ConstantExpr::getIntToPtr(CI, GEPTy);
3893 }
3894 // gep (gep V, C), (xor V, -1) -> C-1
3895 if (match(Ops.back(),
3896 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) {
3897 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
3898 return ConstantExpr::getIntToPtr(CI, GEPTy);
3899 }
3900 }
3901 }
3902
3903 // Check to see if this is constant foldable.
3904 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); }))
3905 return nullptr;
3906
3907 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
3908 Ops.slice(1));
3909 if (auto *CEFolded = ConstantFoldConstant(CE, Q.DL))
3910 return CEFolded;
3911 return CE;
3912}
3913
3914Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
3915 const SimplifyQuery &Q) {
3916 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
3917}
3918
3919/// Given operands for an InsertValueInst, see if we can fold the result.
3920/// If not, this returns null.
3921static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
3922 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
3923 unsigned) {
3924 if (Constant *CAgg = dyn_cast<Constant>(Agg))
3925 if (Constant *CVal = dyn_cast<Constant>(Val))
3926 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
3927
3928 // insertvalue x, undef, n -> x
3929 if (match(Val, m_Undef()))
3930 return Agg;
3931
3932 // insertvalue x, (extractvalue y, n), n
3933 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
3934 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
3935 EV->getIndices() == Idxs) {
3936 // insertvalue undef, (extractvalue y, n), n -> y
3937 if (match(Agg, m_Undef()))
3938 return EV->getAggregateOperand();
3939
3940 // insertvalue y, (extractvalue y, n), n -> y
3941 if (Agg == EV->getAggregateOperand())
3942 return Agg;
3943 }
3944
3945 return nullptr;
3946}
3947
3948Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
3949 ArrayRef<unsigned> Idxs,
3950 const SimplifyQuery &Q) {
3951 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
3952}
3953
3954Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
3955 const SimplifyQuery &Q) {
3956 // Try to constant fold.
3957 auto *VecC = dyn_cast<Constant>(Vec);
3958 auto *ValC = dyn_cast<Constant>(Val);
3959 auto *IdxC = dyn_cast<Constant>(Idx);
3960 if (VecC && ValC && IdxC)
3961 return ConstantFoldInsertElementInstruction(VecC, ValC, IdxC);
3962
3963 // Fold into undef if index is out of bounds.
3964 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
3965 uint64_t NumElements = cast<VectorType>(Vec->getType())->getNumElements();
3966 if (CI->uge(NumElements))
3967 return UndefValue::get(Vec->getType());
3968 }
3969
3970 // If index is undef, it might be out of bounds (see above case)
3971 if (isa<UndefValue>(Idx))
3972 return UndefValue::get(Vec->getType());
3973
3974 return nullptr;
3975}
3976
3977/// Given operands for an ExtractValueInst, see if we can fold the result.
3978/// If not, this returns null.
3979static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
3980 const SimplifyQuery &, unsigned) {
3981 if (auto *CAgg = dyn_cast<Constant>(Agg))
3982 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
3983
3984 // extractvalue x, (insertvalue y, elt, n), n -> elt
3985 unsigned NumIdxs = Idxs.size();
3986 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
3987 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
3988 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
3989 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
3990 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
3991 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
3992 Idxs.slice(0, NumCommonIdxs)) {
3993 if (NumIdxs == NumInsertValueIdxs)
3994 return IVI->getInsertedValueOperand();
3995 break;
3996 }
3997 }
3998
3999 return nullptr;
4000}
4001
4002Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4003 const SimplifyQuery &Q) {
4004 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
4005}
4006
4007/// Given operands for an ExtractElementInst, see if we can fold the result.
4008/// If not, this returns null.
4009static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &,
4010 unsigned) {
4011 if (auto *CVec = dyn_cast<Constant>(Vec)) {
4012 if (auto *CIdx = dyn_cast<Constant>(Idx))
4013 return ConstantFoldExtractElementInstruction(CVec, CIdx);
4014
4015 // The index is not relevant if our vector is a splat.
4016 if (auto *Splat = CVec->getSplatValue())
4017 return Splat;
4018
4019 if (isa<UndefValue>(Vec))
4020 return UndefValue::get(Vec->getType()->getVectorElementType());
4021 }
4022
4023 // If extracting a specified index from the vector, see if we can recursively
4024 // find a previously computed scalar that was inserted into the vector.
4025 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
4026 if (IdxC->getValue().uge(Vec->getType()->getVectorNumElements()))
4027 // definitely out of bounds, thus undefined result
4028 return UndefValue::get(Vec->getType()->getVectorElementType());
4029 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4030 return Elt;
4031 }
4032
4033 // An undef extract index can be arbitrarily chosen to be an out-of-range
4034 // index value, which would result in the instruction being undef.
4035 if (isa<UndefValue>(Idx))
4036 return UndefValue::get(Vec->getType()->getVectorElementType());
4037
4038 return nullptr;
4039}
4040
4041Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
4042 const SimplifyQuery &Q) {
4043 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
4044}
4045
4046/// See if we can fold the given phi. If not, returns null.
4047static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) {
4048 // If all of the PHI's incoming values are the same then replace the PHI node
4049 // with the common value.
4050 Value *CommonValue = nullptr;
4051 bool HasUndefInput = false;
4052 for (Value *Incoming : PN->incoming_values()) {
4053 // If the incoming value is the phi node itself, it can safely be skipped.
4054 if (Incoming == PN) continue;
4055 if (isa<UndefValue>(Incoming)) {
4056 // Remember that we saw an undef value, but otherwise ignore them.
4057 HasUndefInput = true;
4058 continue;
4059 }
4060 if (CommonValue && Incoming != CommonValue)
4061 return nullptr; // Not the same, bail out.
4062 CommonValue = Incoming;
4063 }
4064
4065 // If CommonValue is null then all of the incoming values were either undef or
4066 // equal to the phi node itself.
4067 if (!CommonValue)
4068 return UndefValue::get(PN->getType());
4069
4070 // If we have a PHI node like phi(X, undef, X), where X is defined by some
4071 // instruction, we cannot return X as the result of the PHI node unless it
4072 // dominates the PHI block.
4073 if (HasUndefInput)
4074 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4075
4076 return CommonValue;
4077}
4078
4079static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4080 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4081 if (auto *C = dyn_cast<Constant>(Op))
4082 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4083
4084 if (auto *CI = dyn_cast<CastInst>(Op)) {
4085 auto *Src = CI->getOperand(0);
4086 Type *SrcTy = Src->getType();
4087 Type *MidTy = CI->getType();
4088 Type *DstTy = Ty;
4089 if (Src->getType() == Ty) {
4090 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4091 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4092 Type *SrcIntPtrTy =
4093 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4094 Type *MidIntPtrTy =
4095 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4096 Type *DstIntPtrTy =
4097 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4098 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4099 SrcIntPtrTy, MidIntPtrTy,
4100 DstIntPtrTy) == Instruction::BitCast)
4101 return Src;
4102 }
4103 }
4104
4105 // bitcast x -> x
4106 if (CastOpc == Instruction::BitCast)
4107 if (Op->getType() == Ty)
4108 return Op;
4109
4110 return nullptr;
4111}
4112
4113Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4114 const SimplifyQuery &Q) {
4115 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4116}
4117
4118/// For the given destination element of a shuffle, peek through shuffles to
4119/// match a root vector source operand that contains that element in the same
4120/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
4121static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4122 int MaskVal, Value *RootVec,
4123 unsigned MaxRecurse) {
4124 if (!MaxRecurse--)
4125 return nullptr;
4126
4127 // Bail out if any mask value is undefined. That kind of shuffle may be
4128 // simplified further based on demanded bits or other folds.
4129 if (MaskVal == -1)
4130 return nullptr;
4131
4132 // The mask value chooses which source operand we need to look at next.
4133 int InVecNumElts = Op0->getType()->getVectorNumElements();
4134 int RootElt = MaskVal;
4135 Value *SourceOp = Op0;
4136 if (MaskVal >= InVecNumElts) {
4137 RootElt = MaskVal - InVecNumElts;
4138 SourceOp = Op1;
4139 }
4140
4141 // If the source operand is a shuffle itself, look through it to find the
4142 // matching root vector.
4143 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4144 return foldIdentityShuffles(
4145 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4146 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4147 }
4148
4149 // TODO: Look through bitcasts? What if the bitcast changes the vector element
4150 // size?
4151
4152 // The source operand is not a shuffle. Initialize the root vector value for
4153 // this shuffle if that has not been done yet.
4154 if (!RootVec)
4155 RootVec = SourceOp;
4156
4157 // Give up as soon as a source operand does not match the existing root value.
4158 if (RootVec != SourceOp)
4159 return nullptr;
4160
4161 // The element must be coming from the same lane in the source vector
4162 // (although it may have crossed lanes in intermediate shuffles).
4163 if (RootElt != DestElt)
4164 return nullptr;
4165
4166 return RootVec;
4167}
4168
4169static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
4170 Type *RetTy, const SimplifyQuery &Q,
4171 unsigned MaxRecurse) {
4172 if (isa<UndefValue>(Mask))
4173 return UndefValue::get(RetTy);
4174
4175 Type *InVecTy = Op0->getType();
4176 unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
4177 unsigned InVecNumElts = InVecTy->getVectorNumElements();
4178
4179 SmallVector<int, 32> Indices;
4180 ShuffleVectorInst::getShuffleMask(Mask, Indices);
4181 assert(MaskNumElts == Indices.size() &&(static_cast <bool> (MaskNumElts == Indices.size() &&
"Size of Indices not same as number of mask elements?") ? void
(0) : __assert_fail ("MaskNumElts == Indices.size() && \"Size of Indices not same as number of mask elements?\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 4182, __extension__ __PRETTY_FUNCTION__))
4182 "Size of Indices not same as number of mask elements?")(static_cast <bool> (MaskNumElts == Indices.size() &&
"Size of Indices not same as number of mask elements?") ? void
(0) : __assert_fail ("MaskNumElts == Indices.size() && \"Size of Indices not same as number of mask elements?\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 4182, __extension__ __PRETTY_FUNCTION__))
;
4183
4184 // Canonicalization: If mask does not select elements from an input vector,
4185 // replace that input vector with undef.
4186 bool MaskSelects0 = false, MaskSelects1 = false;
4187 for (unsigned i = 0; i != MaskNumElts; ++i) {
4188 if (Indices[i] == -1)
4189 continue;
4190 if ((unsigned)Indices[i] < InVecNumElts)
4191 MaskSelects0 = true;
4192 else
4193 MaskSelects1 = true;
4194 }
4195 if (!MaskSelects0)
4196 Op0 = UndefValue::get(InVecTy);
4197 if (!MaskSelects1)
4198 Op1 = UndefValue::get(InVecTy);
4199
4200 auto *Op0Const = dyn_cast<Constant>(Op0);
4201 auto *Op1Const = dyn_cast<Constant>(Op1);
4202
4203 // If all operands are constant, constant fold the shuffle.
4204 if (Op0Const && Op1Const)
4205 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
4206
4207 // Canonicalization: if only one input vector is constant, it shall be the
4208 // second one.
4209 if (Op0Const && !Op1Const) {
4210 std::swap(Op0, Op1);
4211 ShuffleVectorInst::commuteShuffleMask(Indices, InVecNumElts);
4212 }
4213
4214 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4215 // value type is same as the input vectors' type.
4216 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4217 if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
4218 OpShuf->getMask()->getSplatValue())
4219 return Op0;
4220
4221 // Don't fold a shuffle with undef mask elements. This may get folded in a
4222 // better way using demanded bits or other analysis.
4223 // TODO: Should we allow this?
4224 if (find(Indices, -1) != Indices.end())
4225 return nullptr;
4226
4227 // Check if every element of this shuffle can be mapped back to the
4228 // corresponding element of a single root vector. If so, we don't need this
4229 // shuffle. This handles simple identity shuffles as well as chains of
4230 // shuffles that may widen/narrow and/or move elements across lanes and back.
4231 Value *RootVec = nullptr;
4232 for (unsigned i = 0; i != MaskNumElts; ++i) {
4233 // Note that recursion is limited for each vector element, so if any element
4234 // exceeds the limit, this will fail to simplify.
4235 RootVec =
4236 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4237
4238 // We can't replace a widening/narrowing shuffle with one of its operands.
4239 if (!RootVec || RootVec->getType() != RetTy)
4240 return nullptr;
4241 }
4242 return RootVec;
4243}
4244
4245/// Given operands for a ShuffleVectorInst, fold the result or return null.
4246Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
4247 Type *RetTy, const SimplifyQuery &Q) {
4248 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4249}
4250
4251static Constant *propagateNaN(Constant *In) {
4252 // If the input is a vector with undef elements, just return a default NaN.
4253 if (!In->isNaN())
4254 return ConstantFP::getNaN(In->getType());
4255
4256 // Propagate the existing NaN constant when possible.
4257 // TODO: Should we quiet a signaling NaN?
4258 return In;
4259}
4260
4261static Constant *simplifyFPBinop(Value *Op0, Value *Op1) {
4262 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1))
4263 return ConstantFP::getNaN(Op0->getType());
4264
4265 if (match(Op0, m_NaN()))
4266 return propagateNaN(cast<Constant>(Op0));
4267 if (match(Op1, m_NaN()))
4268 return propagateNaN(cast<Constant>(Op1));
4269
4270 return nullptr;
4271}
4272
4273/// Given operands for an FAdd, see if we can fold the result. If not, this
4274/// returns null.
4275static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4276 const SimplifyQuery &Q, unsigned MaxRecurse) {
4277 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
4278 return C;
4279
4280 if (Constant *C = simplifyFPBinop(Op0, Op1))
4281 return C;
4282
4283 // fadd X, -0 ==> X
4284 if (match(Op1, m_NegZeroFP()))
4285 return Op0;
4286
4287 // fadd X, 0 ==> X, when we know X is not -0
4288 if (match(Op1, m_PosZeroFP()) &&
4289 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4290 return Op0;
4291
4292 // With nnan: (+/-0.0 - X) + X --> 0.0 (and commuted variant)
4293 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
4294 // Negative zeros are allowed because we always end up with positive zero:
4295 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4296 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4297 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
4298 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
4299 if (FMF.noNaNs() && (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
4300 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0)))))
4301 return ConstantFP::getNullValue(Op0->getType());
4302
4303 return nullptr;
4304}
4305
4306/// Given operands for an FSub, see if we can fold the result. If not, this
4307/// returns null.
4308static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4309 const SimplifyQuery &Q, unsigned MaxRecurse) {
4310 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
4311 return C;
4312
4313 if (Constant *C = simplifyFPBinop(Op0, Op1))
4314 return C;
4315
4316 // fsub X, +0 ==> X
4317 if (match(Op1, m_PosZeroFP()))
4318 return Op0;
4319
4320 // fsub X, -0 ==> X, when we know X is not -0
4321 if (match(Op1, m_NegZeroFP()) &&
4322 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4323 return Op0;
4324
4325 // fsub -0.0, (fsub -0.0, X) ==> X
4326 Value *X;
4327 if (match(Op0, m_NegZeroFP()) &&
4328 match(Op1, m_FSub(m_NegZeroFP(), m_Value(X))))
4329 return X;
4330
4331 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
4332 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
4333 match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))))
4334 return X;
4335
4336 // fsub nnan x, x ==> 0.0
4337 if (FMF.noNaNs() && Op0 == Op1)
4338 return Constant::getNullValue(Op0->getType());
4339
4340 return nullptr;
4341}
4342
4343/// Given the operands for an FMul, see if we can fold the result
4344static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4345 const SimplifyQuery &Q, unsigned MaxRecurse) {
4346 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
4347 return C;
4348
4349 if (Constant *C = simplifyFPBinop(Op0, Op1))
4350 return C;
4351
4352 // fmul X, 1.0 ==> X
4353 if (match(Op1, m_FPOne()))
4354 return Op0;
4355
4356 // fmul nnan nsz X, 0 ==> 0
4357 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP()))
4358 return ConstantFP::getNullValue(Op0->getType());
4359
4360 // sqrt(X) * sqrt(X) --> X, if we can:
4361 // 1. Remove the intermediate rounding (reassociate).
4362 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
4363 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
4364 Value *X;
4365 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) &&
4366 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros())
4367 return X;
4368
4369 return nullptr;
4370}
4371
4372Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4373 const SimplifyQuery &Q) {
4374 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
4375}
4376
4377
4378Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4379 const SimplifyQuery &Q) {
4380 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit);
4381}
4382
4383Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4384 const SimplifyQuery &Q) {
4385 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
4386}
4387
4388static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4389 const SimplifyQuery &Q, unsigned) {
4390 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
4391 return C;
4392
4393 if (Constant *C = simplifyFPBinop(Op0, Op1))
4394 return C;
4395
4396 // X / 1.0 -> X
4397 if (match(Op1, m_FPOne()))
4398 return Op0;
4399
4400 // 0 / X -> 0
4401 // Requires that NaNs are off (X could be zero) and signed zeroes are
4402 // ignored (X could be positive or negative, so the output sign is unknown).
4403 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
4404 return ConstantFP::getNullValue(Op0->getType());
4405
4406 if (FMF.noNaNs()) {
4407 // X / X -> 1.0 is legal when NaNs are ignored.
4408 // We can ignore infinities because INF/INF is NaN.
4409 if (Op0 == Op1)
4410 return ConstantFP::get(Op0->getType(), 1.0);
4411
4412 // (X * Y) / Y --> X if we can reassociate to the above form.
4413 Value *X;
4414 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
4415 return X;
4416
4417 // -X / X -> -1.0 and
4418 // X / -X -> -1.0 are legal when NaNs are ignored.
4419 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
4420 if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
4421 BinaryOperator::getFNegArgument(Op0) == Op1) ||
4422 (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
4423 BinaryOperator::getFNegArgument(Op1) == Op0))
4424 return ConstantFP::get(Op0->getType(), -1.0);
4425 }
4426
4427 return nullptr;
4428}
4429
4430Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4431 const SimplifyQuery &Q) {
4432 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
4433}
4434
4435static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4436 const SimplifyQuery &Q, unsigned) {
4437 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
4438 return C;
4439
4440 if (Constant *C = simplifyFPBinop(Op0, Op1))
4441 return C;
4442
4443 // Unlike fdiv, the result of frem always matches the sign of the dividend.
4444 // The constant match may include undef elements in a vector, so return a full
4445 // zero constant as the result.
4446 if (FMF.noNaNs()) {
4447 // +0 % X -> 0
4448 if (match(Op0, m_PosZeroFP()))
4449 return ConstantFP::getNullValue(Op0->getType());
4450 // -0 % X -> -0
4451 if (match(Op0, m_NegZeroFP()))
4452 return ConstantFP::getNegativeZero(Op0->getType());
4453 }
4454
4455 return nullptr;
4456}
4457
4458Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4459 const SimplifyQuery &Q) {
4460 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
4461}
4462
4463//=== Helper functions for higher up the class hierarchy.
4464
4465/// Given operands for a BinaryOperator, see if we can fold the result.
4466/// If not, this returns null.
4467static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4468 const SimplifyQuery &Q, unsigned MaxRecurse) {
4469 switch (Opcode) {
4470 case Instruction::Add:
4471 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
4472 case Instruction::Sub:
4473 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
4474 case Instruction::Mul:
4475 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
4476 case Instruction::SDiv:
4477 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
4478 case Instruction::UDiv:
4479 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
4480 case Instruction::SRem:
4481 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
4482 case Instruction::URem:
4483 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
4484 case Instruction::Shl:
4485 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
4486 case Instruction::LShr:
4487 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
4488 case Instruction::AShr:
4489 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
4490 case Instruction::And:
4491 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
4492 case Instruction::Or:
4493 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
4494 case Instruction::Xor:
4495 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
4496 case Instruction::FAdd:
4497 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4498 case Instruction::FSub:
4499 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4500 case Instruction::FMul:
4501 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4502 case Instruction::FDiv:
4503 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4504 case Instruction::FRem:
4505 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
4506 default:
4507 llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 4507)
;
4508 }
4509}
4510
4511/// Given operands for a BinaryOperator, see if we can fold the result.
4512/// If not, this returns null.
4513/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
4514/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
4515static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4516 const FastMathFlags &FMF, const SimplifyQuery &Q,
4517 unsigned MaxRecurse) {
4518 switch (Opcode) {
4519 case Instruction::FAdd:
4520 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
4521 case Instruction::FSub:
4522 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
4523 case Instruction::FMul:
4524 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
4525 case Instruction::FDiv:
4526 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
4527 default:
4528 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
4529 }
4530}
4531
4532Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4533 const SimplifyQuery &Q) {
4534 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
4535}
4536
4537Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
4538 FastMathFlags FMF, const SimplifyQuery &Q) {
4539 return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
4540}
4541
4542/// Given operands for a CmpInst, see if we can fold the result.
4543static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4544 const SimplifyQuery &Q, unsigned MaxRecurse) {
4545 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
2
Taking false branch
4546 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
4547 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
3
Calling 'SimplifyFCmpInst'
4548}
4549
4550Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4551 const SimplifyQuery &Q) {
4552 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
1
Calling 'SimplifyCmpInst'
4553}
4554
4555static bool IsIdempotent(Intrinsic::ID ID) {
4556 switch (ID) {
4557 default: return false;
4558
4559 // Unary idempotent: f(f(x)) = f(x)
4560 case Intrinsic::fabs:
4561 case Intrinsic::floor:
4562 case Intrinsic::ceil:
4563 case Intrinsic::trunc:
4564 case Intrinsic::rint:
4565 case Intrinsic::nearbyint:
4566 case Intrinsic::round:
4567 case Intrinsic::canonicalize:
4568 return true;
4569 }
4570}
4571
4572static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
4573 const DataLayout &DL) {
4574 GlobalValue *PtrSym;
4575 APInt PtrOffset;
4576 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
4577 return nullptr;
4578
4579 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
4580 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
4581 Type *Int32PtrTy = Int32Ty->getPointerTo();
4582 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
4583
4584 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
4585 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
4586 return nullptr;
4587
4588 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
4589 if (OffsetInt % 4 != 0)
4590 return nullptr;
4591
4592 Constant *C = ConstantExpr::getGetElementPtr(
4593 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
4594 ConstantInt::get(Int64Ty, OffsetInt / 4));
4595 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
4596 if (!Loaded)
4597 return nullptr;
4598
4599 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
4600 if (!LoadedCE)
4601 return nullptr;
4602
4603 if (LoadedCE->getOpcode() == Instruction::Trunc) {
4604 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4605 if (!LoadedCE)
4606 return nullptr;
4607 }
4608
4609 if (LoadedCE->getOpcode() != Instruction::Sub)
4610 return nullptr;
4611
4612 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
4613 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
4614 return nullptr;
4615 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
4616
4617 Constant *LoadedRHS = LoadedCE->getOperand(1);
4618 GlobalValue *LoadedRHSSym;
4619 APInt LoadedRHSOffset;
4620 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
4621 DL) ||
4622 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
4623 return nullptr;
4624
4625 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
4626}
4627
4628static bool maskIsAllZeroOrUndef(Value *Mask) {
4629 auto *ConstMask = dyn_cast<Constant>(Mask);
4630 if (!ConstMask)
4631 return false;
4632 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
4633 return true;
4634 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
4635 ++I) {
4636 if (auto *MaskElt = ConstMask->getAggregateElement(I))
4637 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
4638 continue;
4639 return false;
4640 }
4641 return true;
4642}
4643
4644template <typename IterTy>
4645static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
4646 const SimplifyQuery &Q, unsigned MaxRecurse) {
4647 Intrinsic::ID IID = F->getIntrinsicID();
4648 unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
4649
4650 // Unary Ops
4651 if (NumOperands == 1) {
4652 // Perform idempotent optimizations
4653 if (IsIdempotent(IID)) {
4654 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) {
4655 if (II->getIntrinsicID() == IID)
4656 return II;
4657 }
4658 }
4659
4660 Value *IIOperand = *ArgBegin;
4661 Value *X;
4662 switch (IID) {
4663 case Intrinsic::fabs: {
4664 if (SignBitMustBeZero(IIOperand, Q.TLI))
4665 return IIOperand;
4666 return nullptr;
4667 }
4668 case Intrinsic::bswap: {
4669 // bswap(bswap(x)) -> x
4670 if (match(IIOperand, m_BSwap(m_Value(X))))
4671 return X;
4672 return nullptr;
4673 }
4674 case Intrinsic::bitreverse: {
4675 // bitreverse(bitreverse(x)) -> x
4676 if (match(IIOperand, m_BitReverse(m_Value(X))))
4677 return X;
4678 return nullptr;
4679 }
4680 case Intrinsic::exp: {
4681 // exp(log(x)) -> x
4682 if (Q.CxtI->hasAllowReassoc() &&
4683 match(IIOperand, m_Intrinsic<Intrinsic::log>(m_Value(X))))
4684 return X;
4685 return nullptr;
4686 }
4687 case Intrinsic::exp2: {
4688 // exp2(log2(x)) -> x
4689 if (Q.CxtI->hasAllowReassoc() &&
4690 match(IIOperand, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
4691 return X;
4692 return nullptr;
4693 }
4694 case Intrinsic::log: {
4695 // log(exp(x)) -> x
4696 if (Q.CxtI->hasAllowReassoc() &&
4697 match(IIOperand, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
4698 return X;
4699 return nullptr;
4700 }
4701 case Intrinsic::log2: {
4702 // log2(exp2(x)) -> x
4703 if (Q.CxtI->hasAllowReassoc() &&
4704 match(IIOperand, m_Intrinsic<Intrinsic::exp2>(m_Value(X)))) {
4705 return X;
4706 }
4707 return nullptr;
4708 }
4709 default:
4710 return nullptr;
4711 }
4712 }
4713
4714 // Binary Ops
4715 if (NumOperands == 2) {
4716 Value *LHS = *ArgBegin;
4717 Value *RHS = *(ArgBegin + 1);
4718 Type *ReturnType = F->getReturnType();
4719
4720 switch (IID) {
4721 case Intrinsic::usub_with_overflow:
4722 case Intrinsic::ssub_with_overflow: {
4723 // X - X -> { 0, false }
4724 if (LHS == RHS)
4725 return Constant::getNullValue(ReturnType);
4726
4727 // X - undef -> undef
4728 // undef - X -> undef
4729 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4730 return UndefValue::get(ReturnType);
4731
4732 return nullptr;
4733 }
4734 case Intrinsic::uadd_with_overflow:
4735 case Intrinsic::sadd_with_overflow: {
4736 // X + undef -> undef
4737 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4738 return UndefValue::get(ReturnType);
4739
4740 return nullptr;
4741 }
4742 case Intrinsic::umul_with_overflow:
4743 case Intrinsic::smul_with_overflow: {
4744 // 0 * X -> { 0, false }
4745 // X * 0 -> { 0, false }
4746 if (match(LHS, m_Zero()) || match(RHS, m_Zero()))
4747 return Constant::getNullValue(ReturnType);
4748
4749 // undef * X -> { 0, false }
4750 // X * undef -> { 0, false }
4751 if (match(LHS, m_Undef()) || match(RHS, m_Undef()))
4752 return Constant::getNullValue(ReturnType);
4753
4754 return nullptr;
4755 }
4756 case Intrinsic::load_relative: {
4757 Constant *C0 = dyn_cast<Constant>(LHS);
4758 Constant *C1 = dyn_cast<Constant>(RHS);
4759 if (C0 && C1)
4760 return SimplifyRelativeLoad(C0, C1, Q.DL);
4761 return nullptr;
4762 }
4763 case Intrinsic::powi:
4764 if (ConstantInt *Power = dyn_cast<ConstantInt>(RHS)) {
4765 // powi(x, 0) -> 1.0
4766 if (Power->isZero())
4767 return ConstantFP::get(LHS->getType(), 1.0);
4768 // powi(x, 1) -> x
4769 if (Power->isOne())
4770 return LHS;
4771 }
4772 return nullptr;
4773 case Intrinsic::maxnum:
4774 case Intrinsic::minnum:
4775 // If one argument is NaN, return the other argument.
4776 if (match(LHS, m_NaN()))
4777 return RHS;
4778 if (match(RHS, m_NaN()))
4779 return LHS;
4780 return nullptr;
4781 default:
4782 return nullptr;
4783 }
4784 }
4785
4786 // Simplify calls to llvm.masked.load.*
4787 switch (IID) {
4788 case Intrinsic::masked_load: {
4789 Value *MaskArg = ArgBegin[2];
4790 Value *PassthruArg = ArgBegin[3];
4791 // If the mask is all zeros or undef, the "passthru" argument is the result.
4792 if (maskIsAllZeroOrUndef(MaskArg))
4793 return PassthruArg;
4794 return nullptr;
4795 }
4796 default:
4797 return nullptr;
4798 }
4799}
4800
4801template <typename IterTy>
4802static Value *SimplifyCall(ImmutableCallSite CS, Value *V, IterTy ArgBegin,
4803 IterTy ArgEnd, const SimplifyQuery &Q,
4804 unsigned MaxRecurse) {
4805 Type *Ty = V->getType();
4806 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
4807 Ty = PTy->getElementType();
4808 FunctionType *FTy = cast<FunctionType>(Ty);
4809
4810 // call undef -> undef
4811 // call null -> undef
4812 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4813 return UndefValue::get(FTy->getReturnType());
4814
4815 Function *F = dyn_cast<Function>(V);
4816 if (!F)
4817 return nullptr;
4818
4819 if (F->isIntrinsic())
4820 if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse))
4821 return Ret;
4822
4823 if (!canConstantFoldCallTo(CS, F))
4824 return nullptr;
4825
4826 SmallVector<Constant *, 4> ConstantArgs;
4827 ConstantArgs.reserve(ArgEnd - ArgBegin);
4828 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
4829 Constant *C = dyn_cast<Constant>(*I);
4830 if (!C)
4831 return nullptr;
4832 ConstantArgs.push_back(C);
4833 }
4834
4835 return ConstantFoldCall(CS, F, ConstantArgs, Q.TLI);
4836}
4837
4838Value *llvm::SimplifyCall(ImmutableCallSite CS, Value *V,
4839 User::op_iterator ArgBegin, User::op_iterator ArgEnd,
4840 const SimplifyQuery &Q) {
4841 return ::SimplifyCall(CS, V, ArgBegin, ArgEnd, Q, RecursionLimit);
4842}
4843
4844Value *llvm::SimplifyCall(ImmutableCallSite CS, Value *V,
4845 ArrayRef<Value *> Args, const SimplifyQuery &Q) {
4846 return ::SimplifyCall(CS, V, Args.begin(), Args.end(), Q, RecursionLimit);
4847}
4848
4849Value *llvm::SimplifyCall(ImmutableCallSite ICS, const SimplifyQuery &Q) {
4850 CallSite CS(const_cast<Instruction*>(ICS.getInstruction()));
4851 return ::SimplifyCall(CS, CS.getCalledValue(), CS.arg_begin(), CS.arg_end(),
4852 Q, RecursionLimit);
4853}
4854
4855/// See if we can compute a simplified version of this instruction.
4856/// If not, this returns null.
4857
4858Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
4859 OptimizationRemarkEmitter *ORE) {
4860 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
4861 Value *Result;
4862
4863 switch (I->getOpcode()) {
4864 default:
4865 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI);
4866 break;
4867 case Instruction::FAdd:
4868 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
4869 I->getFastMathFlags(), Q);
4870 break;
4871 case Instruction::Add:
4872 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
4873 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4874 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4875 break;
4876 case Instruction::FSub:
4877 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
4878 I->getFastMathFlags(), Q);
4879 break;
4880 case Instruction::Sub:
4881 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
4882 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4883 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4884 break;
4885 case Instruction::FMul:
4886 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
4887 I->getFastMathFlags(), Q);
4888 break;
4889 case Instruction::Mul:
4890 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q);
4891 break;
4892 case Instruction::SDiv:
4893 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q);
4894 break;
4895 case Instruction::UDiv:
4896 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q);
4897 break;
4898 case Instruction::FDiv:
4899 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
4900 I->getFastMathFlags(), Q);
4901 break;
4902 case Instruction::SRem:
4903 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q);
4904 break;
4905 case Instruction::URem:
4906 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q);
4907 break;
4908 case Instruction::FRem:
4909 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
4910 I->getFastMathFlags(), Q);
4911 break;
4912 case Instruction::Shl:
4913 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
4914 cast<BinaryOperator>(I)->hasNoSignedWrap(),
4915 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
4916 break;
4917 case Instruction::LShr:
4918 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
4919 cast<BinaryOperator>(I)->isExact(), Q);
4920 break;
4921 case Instruction::AShr:
4922 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
4923 cast<BinaryOperator>(I)->isExact(), Q);
4924 break;
4925 case Instruction::And:
4926 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
4927 break;
4928 case Instruction::Or:
4929 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q);
4930 break;
4931 case Instruction::Xor:
4932 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q);
4933 break;
4934 case Instruction::ICmp:
4935 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
4936 I->getOperand(0), I->getOperand(1), Q);
4937 break;
4938 case Instruction::FCmp:
4939 Result =
4940 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
4941 I->getOperand(1), I->getFastMathFlags(), Q);
4942 break;
4943 case Instruction::Select:
4944 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
4945 I->getOperand(2), Q);
4946 break;
4947 case Instruction::GetElementPtr: {
4948 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end());
4949 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
4950 Ops, Q);
4951 break;
4952 }
4953 case Instruction::InsertValue: {
4954 InsertValueInst *IV = cast<InsertValueInst>(I);
4955 Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
4956 IV->getInsertedValueOperand(),
4957 IV->getIndices(), Q);
4958 break;
4959 }
4960 case Instruction::InsertElement: {
4961 auto *IE = cast<InsertElementInst>(I);
4962 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1),
4963 IE->getOperand(2), Q);
4964 break;
4965 }
4966 case Instruction::ExtractValue: {
4967 auto *EVI = cast<ExtractValueInst>(I);
4968 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
4969 EVI->getIndices(), Q);
4970 break;
4971 }
4972 case Instruction::ExtractElement: {
4973 auto *EEI = cast<ExtractElementInst>(I);
4974 Result = SimplifyExtractElementInst(EEI->getVectorOperand(),
4975 EEI->getIndexOperand(), Q);
4976 break;
4977 }
4978 case Instruction::ShuffleVector: {
4979 auto *SVI = cast<ShuffleVectorInst>(I);
4980 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
4981 SVI->getMask(), SVI->getType(), Q);
4982 break;
4983 }
4984 case Instruction::PHI:
4985 Result = SimplifyPHINode(cast<PHINode>(I), Q);
4986 break;
4987 case Instruction::Call: {
4988 CallSite CS(cast<CallInst>(I));
4989 Result = SimplifyCall(CS, Q);
4990 break;
4991 }
4992#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
4993#include "llvm/IR/Instruction.def"
4994#undef HANDLE_CAST_INST
4995 Result =
4996 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q);
4997 break;
4998 case Instruction::Alloca:
4999 // No simplifications for Alloca and it can't be constant folded.
5000 Result = nullptr;
5001 break;
5002 }
5003
5004 // In general, it is possible for computeKnownBits to determine all bits in a
5005 // value even when the operands are not all constants.
5006 if (!Result && I->getType()->isIntOrIntVectorTy()) {
5007 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
5008 if (Known.isConstant())
5009 Result = ConstantInt::get(I->getType(), Known.getConstant());
5010 }
5011
5012 /// If called on unreachable code, the above logic may report that the
5013 /// instruction simplified to itself. Make life easier for users by
5014 /// detecting that case here, returning a safe value instead.
5015 return Result == I ? UndefValue::get(I->getType()) : Result;
5016}
5017
5018/// Implementation of recursive simplification through an instruction's
5019/// uses.
5020///
5021/// This is the common implementation of the recursive simplification routines.
5022/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
5023/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
5024/// instructions to process and attempt to simplify it using
5025/// InstructionSimplify.
5026///
5027/// This routine returns 'true' only when *it* simplifies something. The passed
5028/// in simplified value does not count toward this.
5029static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
5030 const TargetLibraryInfo *TLI,
5031 const DominatorTree *DT,
5032 AssumptionCache *AC) {
5033 bool Simplified = false;
5034 SmallSetVector<Instruction *, 8> Worklist;
5035 const DataLayout &DL = I->getModule()->getDataLayout();
5036
5037 // If we have an explicit value to collapse to, do that round of the
5038 // simplification loop by hand initially.
5039 if (SimpleV) {
5040 for (User *U : I->users())
5041 if (U != I)
5042 Worklist.insert(cast<Instruction>(U));
5043
5044 // Replace the instruction with its simplified value.
5045 I->replaceAllUsesWith(SimpleV);
5046
5047 // Gracefully handle edge cases where the instruction is not wired into any
5048 // parent block.
5049 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
5050 !I->mayHaveSideEffects())
5051 I->eraseFromParent();
5052 } else {
5053 Worklist.insert(I);
5054 }
5055
5056 // Note that we must test the size on each iteration, the worklist can grow.
5057 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
5058 I = Worklist[Idx];
5059
5060 // See if this instruction simplifies.
5061 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
5062 if (!SimpleV)
5063 continue;
5064
5065 Simplified = true;
5066
5067 // Stash away all the uses of the old instruction so we can check them for
5068 // recursive simplifications after a RAUW. This is cheaper than checking all
5069 // uses of To on the recursive step in most cases.
5070 for (User *U : I->users())
5071 Worklist.insert(cast<Instruction>(U));
5072
5073 // Replace the instruction with its simplified value.
5074 I->replaceAllUsesWith(SimpleV);
5075
5076 // Gracefully handle edge cases where the instruction is not wired into any
5077 // parent block.
5078 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) &&
5079 !I->mayHaveSideEffects())
5080 I->eraseFromParent();
5081 }
5082 return Simplified;
5083}
5084
5085bool llvm::recursivelySimplifyInstruction(Instruction *I,
5086 const TargetLibraryInfo *TLI,
5087 const DominatorTree *DT,
5088 AssumptionCache *AC) {
5089 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
5090}
5091
5092bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
5093 const TargetLibraryInfo *TLI,
5094 const DominatorTree *DT,
5095 AssumptionCache *AC) {
5096 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!")(static_cast <bool> (I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"
) ? void (0) : __assert_fail ("I != SimpleV && \"replaceAndRecursivelySimplify(X,X) is not valid!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 5096, __extension__ __PRETTY_FUNCTION__))
;
5097 assert(SimpleV && "Must provide a simplified value.")(static_cast <bool> (SimpleV && "Must provide a simplified value."
) ? void (0) : __assert_fail ("SimpleV && \"Must provide a simplified value.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Analysis/InstructionSimplify.cpp"
, 5097, __extension__ __PRETTY_FUNCTION__))
;
5098 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
5099}
5100
5101namespace llvm {
5102const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
5103 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
5104 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
5105 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
5106 auto *TLI = TLIWP ? &TLIWP->getTLI() : nullptr;
5107 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
5108 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
5109 return {F.getParent()->getDataLayout(), TLI, DT, AC};
5110}
5111
5112const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
5113 const DataLayout &DL) {
5114 return {DL, &AR.TLI, &AR.DT, &AR.AC};
5115}
5116
5117template <class T, class... TArgs>
5118const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
5119 Function &F) {
5120 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
5121 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
5122 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
5123 return {F.getParent()->getDataLayout(), TLI, DT, AC};
5124}
5125template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
5126 Function &);
5127}

/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h

<
1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Get allocation size in bits. Returns None if size can't be determined,
102 /// e.g. in case of a VLA.
103 Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const;
104
105 /// Return the type that is being allocated by the instruction.
106 Type *getAllocatedType() const { return AllocatedType; }
107 /// for use only in special circumstances that need to generically
108 /// transform a whole instruction (eg: IR linking and vectorization).
109 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
110
111 /// Return the alignment of the memory that is being allocated by the
112 /// instruction.
113 unsigned getAlignment() const {
114 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
115 }
116 void setAlignment(unsigned Align);
117
118 /// Return true if this alloca is in the entry block of the function and is a
119 /// constant size. If so, the code generator will fold it into the
120 /// prolog/epilog code, so it is basically free.
121 bool isStaticAlloca() const;
122
123 /// Return true if this alloca is used as an inalloca argument to a call. Such
124 /// allocas are never considered static even if they are in the entry block.
125 bool isUsedWithInAlloca() const {
126 return getSubclassDataFromInstruction() & 32;
127 }
128
129 /// Specify whether this alloca is used to represent the arguments to a call.
130 void setUsedWithInAlloca(bool V) {
131 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
132 (V ? 32 : 0));
133 }
134
135 /// Return true if this alloca is used as a swifterror argument to a call.
136 bool isSwiftError() const {
137 return getSubclassDataFromInstruction() & 64;
138 }
139
140 /// Specify whether this alloca is used to represent a swifterror.
141 void setSwiftError(bool V) {
142 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
143 (V ? 64 : 0));
144 }
145
146 // Methods for support type inquiry through isa, cast, and dyn_cast:
147 static bool classof(const Instruction *I) {
148 return (I->getOpcode() == Instruction::Alloca);
149 }
150 static bool classof(const Value *V) {
151 return isa<Instruction>(V) && classof(cast<Instruction>(V));
152 }
153
154private:
155 // Shadow Instruction::setInstructionSubclassData with a private forwarding
156 // method so that subclasses cannot accidentally use it.
157 void setInstructionSubclassData(unsigned short D) {
158 Instruction::setInstructionSubclassData(D);
159 }
160};
161
162//===----------------------------------------------------------------------===//
163// LoadInst Class
164//===----------------------------------------------------------------------===//
165
166/// An instruction for reading from memory. This uses the SubclassData field in
167/// Value to store whether or not the load is volatile.
168class LoadInst : public UnaryInstruction {
169 void AssertOK();
170
171protected:
172 // Note: Instruction needs to be a friend here to call cloneImpl.
173 friend class Instruction;
174
175 LoadInst *cloneImpl() const;
176
177public:
178 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
179 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
180 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
181 Instruction *InsertBefore = nullptr);
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
183 Instruction *InsertBefore = nullptr)
184 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
185 NameStr, isVolatile, InsertBefore) {}
186 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
187 BasicBlock *InsertAtEnd);
188 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
189 Instruction *InsertBefore = nullptr)
190 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
191 NameStr, isVolatile, Align, InsertBefore) {}
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
193 unsigned Align, Instruction *InsertBefore = nullptr);
194 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
195 unsigned Align, BasicBlock *InsertAtEnd);
196 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
197 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
198 Instruction *InsertBefore = nullptr)
199 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
200 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order,
203 SyncScope::ID SSID = SyncScope::System,
204 Instruction *InsertBefore = nullptr);
205 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
206 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
207 BasicBlock *InsertAtEnd);
208 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
209 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
210 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
211 bool isVolatile = false, Instruction *InsertBefore = nullptr);
212 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
213 bool isVolatile = false,
214 Instruction *InsertBefore = nullptr)
215 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
216 NameStr, isVolatile, InsertBefore) {}
217 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
218 BasicBlock *InsertAtEnd);
219
220 /// Return true if this is a load from a volatile memory location.
221 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
222
223 /// Specify whether this is a volatile load or not.
224 void setVolatile(bool V) {
225 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
226 (V ? 1 : 0));
227 }
228
229 /// Return the alignment of the access that is being performed.
230 unsigned getAlignment() const {
231 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
232 }
233
234 void setAlignment(unsigned Align);
235
236 /// Returns the ordering constraint of this load instruction.
237 AtomicOrdering getOrdering() const {
238 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
239 }
240
241 /// Sets the ordering constraint of this load instruction. May not be Release
242 /// or AcquireRelease.
243 void setOrdering(AtomicOrdering Ordering) {
244 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
245 ((unsigned)Ordering << 7));
246 }
247
248 /// Returns the synchronization scope ID of this load instruction.
249 SyncScope::ID getSyncScopeID() const {
250 return SSID;
251 }
252
253 /// Sets the synchronization scope ID of this load instruction.
254 void setSyncScopeID(SyncScope::ID SSID) {
255 this->SSID = SSID;
256 }
257
258 /// Sets the ordering constraint and the synchronization scope ID of this load
259 /// instruction.
260 void setAtomic(AtomicOrdering Ordering,
261 SyncScope::ID SSID = SyncScope::System) {
262 setOrdering(Ordering);
263 setSyncScopeID(SSID);
264 }
265
266 bool isSimple() const { return !isAtomic() && !isVolatile(); }
267
268 bool isUnordered() const {
269 return (getOrdering() == AtomicOrdering::NotAtomic ||
270 getOrdering() == AtomicOrdering::Unordered) &&
271 !isVolatile();
272 }
273
274 Value *getPointerOperand() { return getOperand(0); }
275 const Value *getPointerOperand() const { return getOperand(0); }
276 static unsigned getPointerOperandIndex() { return 0U; }
277 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
278
279 /// Returns the address space of the pointer operand.
280 unsigned getPointerAddressSpace() const {
281 return getPointerOperandType()->getPointerAddressSpace();
282 }
283
284 // Methods for support type inquiry through isa, cast, and dyn_cast:
285 static bool classof(const Instruction *I) {
286 return I->getOpcode() == Instruction::Load;
287 }
288 static bool classof(const Value *V) {
289 return isa<Instruction>(V) && classof(cast<Instruction>(V));
290 }
291
292private:
293 // Shadow Instruction::setInstructionSubclassData with a private forwarding
294 // method so that subclasses cannot accidentally use it.
295 void setInstructionSubclassData(unsigned short D) {
296 Instruction::setInstructionSubclassData(D);
297 }
298
299 /// The synchronization scope ID of this load instruction. Not quite enough
300 /// room in SubClassData for everything, so synchronization scope ID gets its
301 /// own field.
302 SyncScope::ID SSID;
303};
304
305//===----------------------------------------------------------------------===//
306// StoreInst Class
307//===----------------------------------------------------------------------===//
308
309/// An instruction for storing to memory.
310class StoreInst : public Instruction {
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
323 Instruction *InsertBefore = nullptr);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
328 unsigned Align, BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order,
331 SyncScope::ID SSID = SyncScope::System,
332 Instruction *InsertBefore = nullptr);
333 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
334 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
335 BasicBlock *InsertAtEnd);
336
337 // allocate space for exactly two operands
338 void *operator new(size_t s) {
339 return User::operator new(s, 2);
340 }
341
342 /// Return true if this is a store to a volatile memory location.
343 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
344
345 /// Specify whether this is a volatile store or not.
346 void setVolatile(bool V) {
347 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
348 (V ? 1 : 0));
349 }
350
351 /// Transparently provide more efficient getOperand methods.
352 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
353
354 /// Return the alignment of the access that is being performed
355 unsigned getAlignment() const {
356 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
357 }
358
359 void setAlignment(unsigned Align);
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
370 ((unsigned)Ordering << 7));
371 }
372
373 /// Returns the synchronization scope ID of this store instruction.
374 SyncScope::ID getSyncScopeID() const {
375 return SSID;
376 }
377
378 /// Sets the synchronization scope ID of this store instruction.
379 void setSyncScopeID(SyncScope::ID SSID) {
380 this->SSID = SSID;
381 }
382
383 /// Sets the ordering constraint and the synchronization scope ID of this
384 /// store instruction.
385 void setAtomic(AtomicOrdering Ordering,
386 SyncScope::ID SSID = SyncScope::System) {
387 setOrdering(Ordering);
388 setSyncScopeID(SSID);
389 }
390
391 bool isSimple() const { return !isAtomic() && !isVolatile(); }
392
393 bool isUnordered() const {
394 return (getOrdering() == AtomicOrdering::NotAtomic ||
395 getOrdering() == AtomicOrdering::Unordered) &&
396 !isVolatile();
397 }
398
399 Value *getValueOperand() { return getOperand(0); }
400 const Value *getValueOperand() const { return getOperand(0); }
401
402 Value *getPointerOperand() { return getOperand(1); }
403 const Value *getPointerOperand() const { return getOperand(1); }
404 static unsigned getPointerOperandIndex() { return 1U; }
405 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
406
407 /// Returns the address space of the pointer operand.
408 unsigned getPointerAddressSpace() const {
409 return getPointerOperandType()->getPointerAddressSpace();
410 }
411
412 // Methods for support type inquiry through isa, cast, and dyn_cast:
413 static bool classof(const Instruction *I) {
414 return I->getOpcode() == Instruction::Store;
415 }
416 static bool classof(const Value *V) {
417 return isa<Instruction>(V) && classof(cast<Instruction>(V));
418 }
419
420private:
421 // Shadow Instruction::setInstructionSubclassData with a private forwarding
422 // method so that subclasses cannot accidentally use it.
423 void setInstructionSubclassData(unsigned short D) {
424 Instruction::setInstructionSubclassData(D);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
446
447protected:
448 // Note: Instruction needs to be a friend here to call cloneImpl.
449 friend class Instruction;
450
451 FenceInst *cloneImpl() const;
452
453public:
454 // Ordering may only be Acquire, Release, AcquireRelease, or
455 // SequentiallyConsistent.
456 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
457 SyncScope::ID SSID = SyncScope::System,
458 Instruction *InsertBefore = nullptr);
459 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
460 BasicBlock *InsertAtEnd);
461
462 // allocate space for exactly zero operands
463 void *operator new(size_t s) {
464 return User::operator new(s, 0);
465 }
466
467 /// Returns the ordering constraint of this fence instruction.
468 AtomicOrdering getOrdering() const {
469 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
470 }
471
472 /// Sets the ordering constraint of this fence instruction. May only be
473 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
474 void setOrdering(AtomicOrdering Ordering) {
475 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
476 ((unsigned)Ordering << 1));
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 void setInstructionSubclassData(unsigned short D) {
501 Instruction::setInstructionSubclassData(D);
502 }
503
504 /// The synchronization scope ID of this fence instruction. Not quite enough
505 /// room in SubClassData for everything, so synchronization scope ID gets its
506 /// own field.
507 SyncScope::ID SSID;
508};
509
510//===----------------------------------------------------------------------===//
511// AtomicCmpXchgInst Class
512//===----------------------------------------------------------------------===//
513
514/// an instruction that atomically checks whether a
515/// specified value is in a memory location, and, if it is, stores a new value
516/// there. Returns the value that was loaded.
517///
518class AtomicCmpXchgInst : public Instruction {
519 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
520 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
521 SyncScope::ID SSID);
522
523protected:
524 // Note: Instruction needs to be a friend here to call cloneImpl.
525 friend class Instruction;
526
527 AtomicCmpXchgInst *cloneImpl() const;
528
529public:
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
534 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
535 AtomicOrdering SuccessOrdering,
536 AtomicOrdering FailureOrdering,
537 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
538
539 // allocate space for exactly three operands
540 void *operator new(size_t s) {
541 return User::operator new(s, 3);
542 }
543
544 /// Return true if this is a cmpxchg from a volatile memory
545 /// location.
546 ///
547 bool isVolatile() const {
548 return getSubclassDataFromInstruction() & 1;
549 }
550
551 /// Specify whether this is a volatile cmpxchg.
552 ///
553 void setVolatile(bool V) {
554 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
555 (unsigned)V);
556 }
557
558 /// Return true if this cmpxchg may spuriously fail.
559 bool isWeak() const {
560 return getSubclassDataFromInstruction() & 0x100;
561 }
562
563 void setWeak(bool IsWeak) {
564 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
565 (IsWeak << 8));
566 }
567
568 /// Transparently provide more efficient getOperand methods.
569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
570
571 /// Returns the success ordering constraint of this cmpxchg instruction.
572 AtomicOrdering getSuccessOrdering() const {
573 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
574 }
575
576 /// Sets the success ordering constraint of this cmpxchg instruction.
577 void setSuccessOrdering(AtomicOrdering Ordering) {
578 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 579, __extension__ __PRETTY_FUNCTION__))
579 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 579, __extension__ __PRETTY_FUNCTION__))
;
580 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
581 ((unsigned)Ordering << 2));
582 }
583
584 /// Returns the failure ordering constraint of this cmpxchg instruction.
585 AtomicOrdering getFailureOrdering() const {
586 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
587 }
588
589 /// Sets the failure ordering constraint of this cmpxchg instruction.
590 void setFailureOrdering(AtomicOrdering Ordering) {
591 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 592, __extension__ __PRETTY_FUNCTION__))
592 "CmpXchg instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "CmpXchg instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 592, __extension__ __PRETTY_FUNCTION__))
;
593 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
594 ((unsigned)Ordering << 5));
595 }
596
597 /// Returns the synchronization scope ID of this cmpxchg instruction.
598 SyncScope::ID getSyncScopeID() const {
599 return SSID;
600 }
601
602 /// Sets the synchronization scope ID of this cmpxchg instruction.
603 void setSyncScopeID(SyncScope::ID SSID) {
604 this->SSID = SSID;
605 }
606
607 Value *getPointerOperand() { return getOperand(0); }
608 const Value *getPointerOperand() const { return getOperand(0); }
609 static unsigned getPointerOperandIndex() { return 0U; }
610
611 Value *getCompareOperand() { return getOperand(1); }
612 const Value *getCompareOperand() const { return getOperand(1); }
613
614 Value *getNewValOperand() { return getOperand(2); }
615 const Value *getNewValOperand() const { return getOperand(2); }
616
617 /// Returns the address space of the pointer operand.
618 unsigned getPointerAddressSpace() const {
619 return getPointerOperand()->getType()->getPointerAddressSpace();
620 }
621
622 /// Returns the strongest permitted ordering on failure, given the
623 /// desired ordering on success.
624 ///
625 /// If the comparison in a cmpxchg operation fails, there is no atomic store
626 /// so release semantics cannot be provided. So this function drops explicit
627 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
628 /// operation would remain SequentiallyConsistent.
629 static AtomicOrdering
630 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
631 switch (SuccessOrdering) {
632 default:
633 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 633)
;
634 case AtomicOrdering::Release:
635 case AtomicOrdering::Monotonic:
636 return AtomicOrdering::Monotonic;
637 case AtomicOrdering::AcquireRelease:
638 case AtomicOrdering::Acquire:
639 return AtomicOrdering::Acquire;
640 case AtomicOrdering::SequentiallyConsistent:
641 return AtomicOrdering::SequentiallyConsistent;
642 }
643 }
644
645 // Methods for support type inquiry through isa, cast, and dyn_cast:
646 static bool classof(const Instruction *I) {
647 return I->getOpcode() == Instruction::AtomicCmpXchg;
648 }
649 static bool classof(const Value *V) {
650 return isa<Instruction>(V) && classof(cast<Instruction>(V));
651 }
652
653private:
654 // Shadow Instruction::setInstructionSubclassData with a private forwarding
655 // method so that subclasses cannot accidentally use it.
656 void setInstructionSubclassData(unsigned short D) {
657 Instruction::setInstructionSubclassData(D);
658 }
659
660 /// The synchronization scope ID of this cmpxchg instruction. Not quite
661 /// enough room in SubClassData for everything, so synchronization scope ID
662 /// gets its own field.
663 SyncScope::ID SSID;
664};
665
666template <>
667struct OperandTraits<AtomicCmpXchgInst> :
668 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
669};
670
671DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 671, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 671, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
672
673//===----------------------------------------------------------------------===//
674// AtomicRMWInst Class
675//===----------------------------------------------------------------------===//
676
677/// an instruction that atomically reads a memory location,
678/// combines it with another value, and then stores the result back. Returns
679/// the old value.
680///
681class AtomicRMWInst : public Instruction {
682protected:
683 // Note: Instruction needs to be a friend here to call cloneImpl.
684 friend class Instruction;
685
686 AtomicRMWInst *cloneImpl() const;
687
688public:
689 /// This enumeration lists the possible modifications atomicrmw can make. In
690 /// the descriptions, 'p' is the pointer to the instruction's memory location,
691 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
692 /// instruction. These instructions always return 'old'.
693 enum BinOp {
694 /// *p = v
695 Xchg,
696 /// *p = old + v
697 Add,
698 /// *p = old - v
699 Sub,
700 /// *p = old & v
701 And,
702 /// *p = ~(old & v)
703 Nand,
704 /// *p = old | v
705 Or,
706 /// *p = old ^ v
707 Xor,
708 /// *p = old >signed v ? old : v
709 Max,
710 /// *p = old <signed v ? old : v
711 Min,
712 /// *p = old >unsigned v ? old : v
713 UMax,
714 /// *p = old <unsigned v ? old : v
715 UMin,
716
717 FIRST_BINOP = Xchg,
718 LAST_BINOP = UMin,
719 BAD_BINOP
720 };
721
722 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
723 AtomicOrdering Ordering, SyncScope::ID SSID,
724 Instruction *InsertBefore = nullptr);
725 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
726 AtomicOrdering Ordering, SyncScope::ID SSID,
727 BasicBlock *InsertAtEnd);
728
729 // allocate space for exactly two operands
730 void *operator new(size_t s) {
731 return User::operator new(s, 2);
732 }
733
734 BinOp getOperation() const {
735 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
736 }
737
738 void setOperation(BinOp Operation) {
739 unsigned short SubclassData = getSubclassDataFromInstruction();
740 setInstructionSubclassData((SubclassData & 31) |
741 (Operation << 5));
742 }
743
744 /// Return true if this is a RMW on a volatile memory location.
745 ///
746 bool isVolatile() const {
747 return getSubclassDataFromInstruction() & 1;
748 }
749
750 /// Specify whether this is a volatile RMW or not.
751 ///
752 void setVolatile(bool V) {
753 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
754 (unsigned)V);
755 }
756
757 /// Transparently provide more efficient getOperand methods.
758 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
759
760 /// Returns the ordering constraint of this rmw instruction.
761 AtomicOrdering getOrdering() const {
762 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
763 }
764
765 /// Sets the ordering constraint of this rmw instruction.
766 void setOrdering(AtomicOrdering Ordering) {
767 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 768, __extension__ __PRETTY_FUNCTION__))
768 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 768, __extension__ __PRETTY_FUNCTION__))
;
769 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
770 ((unsigned)Ordering << 2));
771 }
772
773 /// Returns the synchronization scope ID of this rmw instruction.
774 SyncScope::ID getSyncScopeID() const {
775 return SSID;
776 }
777
778 /// Sets the synchronization scope ID of this rmw instruction.
779 void setSyncScopeID(SyncScope::ID SSID) {
780 this->SSID = SSID;
781 }
782
783 Value *getPointerOperand() { return getOperand(0); }
784 const Value *getPointerOperand() const { return getOperand(0); }
785 static unsigned getPointerOperandIndex() { return 0U; }
786
787 Value *getValOperand() { return getOperand(1); }
788 const Value *getValOperand() const { return getOperand(1); }
789
790 /// Returns the address space of the pointer operand.
791 unsigned getPointerAddressSpace() const {
792 return getPointerOperand()->getType()->getPointerAddressSpace();
793 }
794
795 // Methods for support type inquiry through isa, cast, and dyn_cast:
796 static bool classof(const Instruction *I) {
797 return I->getOpcode() == Instruction::AtomicRMW;
798 }
799 static bool classof(const Value *V) {
800 return isa<Instruction>(V) && classof(cast<Instruction>(V));
801 }
802
803private:
804 void Init(BinOp Operation, Value *Ptr, Value *Val,
805 AtomicOrdering Ordering, SyncScope::ID SSID);
806
807 // Shadow Instruction::setInstructionSubclassData with a private forwarding
808 // method so that subclasses cannot accidentally use it.
809 void setInstructionSubclassData(unsigned short D) {
810 Instruction::setInstructionSubclassData(D);
811 }
812
813 /// The synchronization scope ID of this rmw instruction. Not quite enough
814 /// room in SubClassData for everything, so synchronization scope ID gets its
815 /// own field.
816 SyncScope::ID SSID;
817};
818
819template <>
820struct OperandTraits<AtomicRMWInst>
821 : public FixedNumOperandTraits<AtomicRMWInst,2> {
822};
823
824DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 824, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 824, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
825
826//===----------------------------------------------------------------------===//
827// GetElementPtrInst Class
828//===----------------------------------------------------------------------===//
829
830// checkGEPType - Simple wrapper function to give a better assertion failure
831// message on bad indexes for a gep instruction.
832//
833inline Type *checkGEPType(Type *Ty) {
834 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-7~svn338205/include/llvm/IR/Instructions.h"
, 834, __extension__ __PRETTY_FUNCTION__))
;
835 return Ty;
836}
837
838/// an instruction for type-safe pointer arithmetic to
839/// access elements of arrays and structs
840///
841class GetElementPtrInst : public Instruction {
842 Type *SourceElementType;