Bug Summary

File:lib/Transforms/Scalar/GuardWidening.cpp
Warning:line 463, column 9
Called C++ object pointer is null

Annotated Source Code

/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp

1//===- GuardWidening.cpp - ---- Guard widening ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the guard widening pass. The semantics of the
11// @llvm.experimental.guard intrinsic lets LLVM transform it so that it fails
12// more often that it did before the transform. This optimization is called
13// "widening" and can be used hoist and common runtime checks in situations like
14// these:
15//
16// %cmp0 = 7 u< Length
17// call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
18// call @unknown_side_effects()
19// %cmp1 = 9 u< Length
20// call @llvm.experimental.guard(i1 %cmp1) [ "deopt"(...) ]
21// ...
22//
23// =>
24//
25// %cmp0 = 9 u< Length
26// call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
27// call @unknown_side_effects()
28// ...
29//
30// If %cmp0 is false, @llvm.experimental.guard will "deoptimize" back to a
31// generic implementation of the same function, which will have the correct
32// semantics from that point onward. It is always _legal_ to deoptimize (so
33// replacing %cmp0 with false is "correct"), though it may not always be
34// profitable to do so.
35//
36// NB! This pass is a work in progress. It hasn't been tuned to be "production
37// ready" yet. It is known to have quadriatic running time and will not scale
38// to large numbers of guards
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Transforms/Scalar/GuardWidening.h"
43#include "llvm/ADT/DenseMap.h"
44#include "llvm/ADT/DepthFirstIterator.h"
45#include "llvm/Analysis/LoopInfo.h"
46#include "llvm/Analysis/PostDominators.h"
47#include "llvm/Analysis/ValueTracking.h"
48#include "llvm/IR/ConstantRange.h"
49#include "llvm/IR/Dominators.h"
50#include "llvm/IR/IntrinsicInst.h"
51#include "llvm/IR/PatternMatch.h"
52#include "llvm/Pass.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/KnownBits.h"
55#include "llvm/Transforms/Scalar.h"
56
57using namespace llvm;
58
59#define DEBUG_TYPE"guard-widening" "guard-widening"
60
61namespace {
62
63class GuardWideningImpl {
64 DominatorTree &DT;
65 PostDominatorTree &PDT;
66 LoopInfo &LI;
67
68 /// The set of guards whose conditions have been widened into dominating
69 /// guards.
70 SmallVector<IntrinsicInst *, 16> EliminatedGuards;
71
72 /// The set of guards which have been widened to include conditions to other
73 /// guards.
74 DenseSet<IntrinsicInst *> WidenedGuards;
75
76 /// Try to eliminate guard \p Guard by widening it into an earlier dominating
77 /// guard. \p DFSI is the DFS iterator on the dominator tree that is
78 /// currently visiting the block containing \p Guard, and \p GuardsPerBlock
79 /// maps BasicBlocks to the set of guards seen in that block.
80 bool eliminateGuardViaWidening(
81 IntrinsicInst *Guard, const df_iterator<DomTreeNode *> &DFSI,
82 const DenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 8>> &
83 GuardsPerBlock);
84
85 /// Used to keep track of which widening potential is more effective.
86 enum WideningScore {
87 /// Don't widen.
88 WS_IllegalOrNegative,
89
90 /// Widening is performance neutral as far as the cycles spent in check
91 /// conditions goes (but can still help, e.g., code layout, having less
92 /// deopt state).
93 WS_Neutral,
94
95 /// Widening is profitable.
96 WS_Positive,
97
98 /// Widening is very profitable. Not significantly different from \c
99 /// WS_Positive, except by the order.
100 WS_VeryPositive
101 };
102
103 static StringRef scoreTypeToString(WideningScore WS);
104
105 /// Compute the score for widening the condition in \p DominatedGuard
106 /// (contained in \p DominatedGuardLoop) into \p DominatingGuard (contained in
107 /// \p DominatingGuardLoop).
108 WideningScore computeWideningScore(IntrinsicInst *DominatedGuard,
109 Loop *DominatedGuardLoop,
110 IntrinsicInst *DominatingGuard,
111 Loop *DominatingGuardLoop);
112
113 /// Helper to check if \p V can be hoisted to \p InsertPos.
114 bool isAvailableAt(Value *V, Instruction *InsertPos) {
115 SmallPtrSet<Instruction *, 8> Visited;
116 return isAvailableAt(V, InsertPos, Visited);
117 }
118
119 bool isAvailableAt(Value *V, Instruction *InsertPos,
120 SmallPtrSetImpl<Instruction *> &Visited);
121
122 /// Helper to hoist \p V to \p InsertPos. Guaranteed to succeed if \c
123 /// isAvailableAt returned true.
124 void makeAvailableAt(Value *V, Instruction *InsertPos);
125
126 /// Common helper used by \c widenGuard and \c isWideningCondProfitable. Try
127 /// to generate an expression computing the logical AND of \p Cond0 and \p
128 /// Cond1. Return true if the expression computing the AND is only as
129 /// expensive as computing one of the two. If \p InsertPt is true then
130 /// actually generate the resulting expression, make it available at \p
131 /// InsertPt and return it in \p Result (else no change to the IR is made).
132 bool widenCondCommon(Value *Cond0, Value *Cond1, Instruction *InsertPt,
133 Value *&Result);
134
135 /// Represents a range check of the form \c Base + \c Offset u< \c Length,
136 /// with the constraint that \c Length is not negative. \c CheckInst is the
137 /// pre-existing instruction in the IR that computes the result of this range
138 /// check.
139 class RangeCheck {
140 Value *Base;
141 ConstantInt *Offset;
142 Value *Length;
143 ICmpInst *CheckInst;
144
145 public:
146 explicit RangeCheck(Value *Base, ConstantInt *Offset, Value *Length,
147 ICmpInst *CheckInst)
148 : Base(Base), Offset(Offset), Length(Length), CheckInst(CheckInst) {}
149
150 void setBase(Value *NewBase) { Base = NewBase; }
151 void setOffset(ConstantInt *NewOffset) { Offset = NewOffset; }
152
153 Value *getBase() const { return Base; }
154 ConstantInt *getOffset() const { return Offset; }
155 const APInt &getOffsetValue() const { return getOffset()->getValue(); }
156 Value *getLength() const { return Length; };
157 ICmpInst *getCheckInst() const { return CheckInst; }
158
159 void print(raw_ostream &OS, bool PrintTypes = false) {
160 OS << "Base: ";
161 Base->printAsOperand(OS, PrintTypes);
162 OS << " Offset: ";
163 Offset->printAsOperand(OS, PrintTypes);
164 OS << " Length: ";
165 Length->printAsOperand(OS, PrintTypes);
166 }
167
168 LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void dump() {
169 print(dbgs());
170 dbgs() << "\n";
171 }
172 };
173
174 /// Parse \p CheckCond into a conjunction (logical-and) of range checks; and
175 /// append them to \p Checks. Returns true on success, may clobber \c Checks
176 /// on failure.
177 bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks) {
178 SmallPtrSet<Value *, 8> Visited;
179 return parseRangeChecks(CheckCond, Checks, Visited);
53
Calling 'GuardWideningImpl::parseRangeChecks'
427
Returning from 'GuardWideningImpl::parseRangeChecks'
430
Calling 'GuardWideningImpl::parseRangeChecks'
804
Returning from 'GuardWideningImpl::parseRangeChecks'
180 }
181
182 bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks,
183 SmallPtrSetImpl<Value *> &Visited);
184
185 /// Combine the checks in \p Checks into a smaller set of checks and append
186 /// them into \p CombinedChecks. Return true on success (i.e. all of checks
187 /// in \p Checks were combined into \p CombinedChecks). Clobbers \p Checks
188 /// and \p CombinedChecks on success and on failure.
189 bool combineRangeChecks(SmallVectorImpl<RangeCheck> &Checks,
190 SmallVectorImpl<RangeCheck> &CombinedChecks);
191
192 /// Can we compute the logical AND of \p Cond0 and \p Cond1 for the price of
193 /// computing only one of the two expressions?
194 bool isWideningCondProfitable(Value *Cond0, Value *Cond1) {
195 Value *ResultUnused;
196 return widenCondCommon(Cond0, Cond1, /*InsertPt=*/nullptr, ResultUnused);
197 }
198
199 /// Widen \p ToWiden to fail if \p NewCondition is false (in addition to
200 /// whatever it is already checking).
201 void widenGuard(IntrinsicInst *ToWiden, Value *NewCondition) {
202 Value *Result;
203 widenCondCommon(ToWiden->getArgOperand(0), NewCondition, ToWiden, Result);
1
Passing value via 4th parameter 'Result'
2
Calling 'GuardWideningImpl::widenCondCommon'
204 ToWiden->setArgOperand(0, Result);
205 }
206
207public:
208 explicit GuardWideningImpl(DominatorTree &DT, PostDominatorTree &PDT,
209 LoopInfo &LI)
210 : DT(DT), PDT(PDT), LI(LI) {}
211
212 /// The entry point for this pass.
213 bool run();
214};
215
216struct GuardWideningLegacyPass : public FunctionPass {
217 static char ID;
218 GuardWideningPass Impl;
219
220 GuardWideningLegacyPass() : FunctionPass(ID) {
221 initializeGuardWideningLegacyPassPass(*PassRegistry::getPassRegistry());
222 }
223
224 bool runOnFunction(Function &F) override {
225 if (skipFunction(F))
226 return false;
227 return GuardWideningImpl(
228 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
229 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(),
230 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()).run();
231 }
232
233 void getAnalysisUsage(AnalysisUsage &AU) const override {
234 AU.setPreservesCFG();
235 AU.addRequired<DominatorTreeWrapperPass>();
236 AU.addRequired<PostDominatorTreeWrapperPass>();
237 AU.addRequired<LoopInfoWrapperPass>();
238 }
239};
240
241}
242
243bool GuardWideningImpl::run() {
244 using namespace llvm::PatternMatch;
245
246 DenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 8>> GuardsInBlock;
247 bool Changed = false;
248
249 for (auto DFI = df_begin(DT.getRootNode()), DFE = df_end(DT.getRootNode());
250 DFI != DFE; ++DFI) {
251 auto *BB = (*DFI)->getBlock();
252 auto &CurrentList = GuardsInBlock[BB];
253
254 for (auto &I : *BB)
255 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>()))
256 CurrentList.push_back(cast<IntrinsicInst>(&I));
257
258 for (auto *II : CurrentList)
259 Changed |= eliminateGuardViaWidening(II, DFI, GuardsInBlock);
260 }
261
262 for (auto *II : EliminatedGuards)
263 if (!WidenedGuards.count(II))
264 II->eraseFromParent();
265
266 return Changed;
267}
268
269bool GuardWideningImpl::eliminateGuardViaWidening(
270 IntrinsicInst *GuardInst, const df_iterator<DomTreeNode *> &DFSI,
271 const DenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 8>> &
272 GuardsInBlock) {
273 IntrinsicInst *BestSoFar = nullptr;
274 auto BestScoreSoFar = WS_IllegalOrNegative;
275 auto *GuardInstLoop = LI.getLoopFor(GuardInst->getParent());
276
277 // In the set of dominating guards, find the one we can merge GuardInst with
278 // for the most profit.
279 for (unsigned i = 0, e = DFSI.getPathLength(); i != e; ++i) {
280 auto *CurBB = DFSI.getPath(i)->getBlock();
281 auto *CurLoop = LI.getLoopFor(CurBB);
282 assert(GuardsInBlock.count(CurBB) && "Must have been populated by now!")((GuardsInBlock.count(CurBB) && "Must have been populated by now!"
) ? static_cast<void> (0) : __assert_fail ("GuardsInBlock.count(CurBB) && \"Must have been populated by now!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 282, __PRETTY_FUNCTION__))
;
283 const auto &GuardsInCurBB = GuardsInBlock.find(CurBB)->second;
284
285 auto I = GuardsInCurBB.begin();
286 auto E = GuardsInCurBB.end();
287
288#ifndef NDEBUG
289 {
290 unsigned Index = 0;
291 for (auto &I : *CurBB) {
292 if (Index == GuardsInCurBB.size())
293 break;
294 if (GuardsInCurBB[Index] == &I)
295 Index++;
296 }
297 assert(Index == GuardsInCurBB.size() &&((Index == GuardsInCurBB.size() && "Guards expected to be in order!"
) ? static_cast<void> (0) : __assert_fail ("Index == GuardsInCurBB.size() && \"Guards expected to be in order!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 298, __PRETTY_FUNCTION__))
298 "Guards expected to be in order!")((Index == GuardsInCurBB.size() && "Guards expected to be in order!"
) ? static_cast<void> (0) : __assert_fail ("Index == GuardsInCurBB.size() && \"Guards expected to be in order!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 298, __PRETTY_FUNCTION__))
;
299 }
300#endif
301
302 assert((i == (e - 1)) == (GuardInst->getParent() == CurBB) && "Bad DFS?")(((i == (e - 1)) == (GuardInst->getParent() == CurBB) &&
"Bad DFS?") ? static_cast<void> (0) : __assert_fail ("(i == (e - 1)) == (GuardInst->getParent() == CurBB) && \"Bad DFS?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 302, __PRETTY_FUNCTION__))
;
303
304 if (i == (e - 1)) {
305 // Corner case: make sure we're only looking at guards strictly dominating
306 // GuardInst when visiting GuardInst->getParent().
307 auto NewEnd = std::find(I, E, GuardInst);
308 assert(NewEnd != E && "GuardInst not in its own block?")((NewEnd != E && "GuardInst not in its own block?") ?
static_cast<void> (0) : __assert_fail ("NewEnd != E && \"GuardInst not in its own block?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 308, __PRETTY_FUNCTION__))
;
309 E = NewEnd;
310 }
311
312 for (auto *Candidate : make_range(I, E)) {
313 auto Score =
314 computeWideningScore(GuardInst, GuardInstLoop, Candidate, CurLoop);
315 DEBUG(dbgs() << "Score between " << *GuardInst->getArgOperand(0)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("guard-widening")) { dbgs() << "Score between " <<
*GuardInst->getArgOperand(0) << " and " << *Candidate
->getArgOperand(0) << " is " << scoreTypeToString
(Score) << "\n"; } } while (false)
316 << " and " << *Candidate->getArgOperand(0) << " is "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("guard-widening")) { dbgs() << "Score between " <<
*GuardInst->getArgOperand(0) << " and " << *Candidate
->getArgOperand(0) << " is " << scoreTypeToString
(Score) << "\n"; } } while (false)
317 << scoreTypeToString(Score) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("guard-widening")) { dbgs() << "Score between " <<
*GuardInst->getArgOperand(0) << " and " << *Candidate
->getArgOperand(0) << " is " << scoreTypeToString
(Score) << "\n"; } } while (false)
;
318 if (Score > BestScoreSoFar) {
319 BestScoreSoFar = Score;
320 BestSoFar = Candidate;
321 }
322 }
323 }
324
325 if (BestScoreSoFar == WS_IllegalOrNegative) {
326 DEBUG(dbgs() << "Did not eliminate guard " << *GuardInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("guard-widening")) { dbgs() << "Did not eliminate guard "
<< *GuardInst << "\n"; } } while (false)
;
327 return false;
328 }
329
330 assert(BestSoFar != GuardInst && "Should have never visited same guard!")((BestSoFar != GuardInst && "Should have never visited same guard!"
) ? static_cast<void> (0) : __assert_fail ("BestSoFar != GuardInst && \"Should have never visited same guard!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 330, __PRETTY_FUNCTION__))
;
331 assert(DT.dominates(BestSoFar, GuardInst) && "Should be!")((DT.dominates(BestSoFar, GuardInst) && "Should be!")
? static_cast<void> (0) : __assert_fail ("DT.dominates(BestSoFar, GuardInst) && \"Should be!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 331, __PRETTY_FUNCTION__))
;
332
333 DEBUG(dbgs() << "Widening " << *GuardInst << " into " << *BestSoFardo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("guard-widening")) { dbgs() << "Widening " << *GuardInst
<< " into " << *BestSoFar << " with score "
<< scoreTypeToString(BestScoreSoFar) << "\n"; } }
while (false)
334 << " with score " << scoreTypeToString(BestScoreSoFar) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("guard-widening")) { dbgs() << "Widening " << *GuardInst
<< " into " << *BestSoFar << " with score "
<< scoreTypeToString(BestScoreSoFar) << "\n"; } }
while (false)
;
335 widenGuard(BestSoFar, GuardInst->getArgOperand(0));
336 GuardInst->setArgOperand(0, ConstantInt::getTrue(GuardInst->getContext()));
337 EliminatedGuards.push_back(GuardInst);
338 WidenedGuards.insert(BestSoFar);
339 return true;
340}
341
342GuardWideningImpl::WideningScore GuardWideningImpl::computeWideningScore(
343 IntrinsicInst *DominatedGuard, Loop *DominatedGuardLoop,
344 IntrinsicInst *DominatingGuard, Loop *DominatingGuardLoop) {
345 bool HoistingOutOfLoop = false;
346
347 if (DominatingGuardLoop != DominatedGuardLoop) {
348 if (DominatingGuardLoop &&
349 !DominatingGuardLoop->contains(DominatedGuardLoop))
350 return WS_IllegalOrNegative;
351
352 HoistingOutOfLoop = true;
353 }
354
355 if (!isAvailableAt(DominatedGuard->getArgOperand(0), DominatingGuard))
356 return WS_IllegalOrNegative;
357
358 bool HoistingOutOfIf =
359 !PDT.dominates(DominatedGuard->getParent(), DominatingGuard->getParent());
360
361 if (isWideningCondProfitable(DominatedGuard->getArgOperand(0),
362 DominatingGuard->getArgOperand(0)))
363 return HoistingOutOfLoop ? WS_VeryPositive : WS_Positive;
364
365 if (HoistingOutOfLoop)
366 return WS_Positive;
367
368 return HoistingOutOfIf ? WS_IllegalOrNegative : WS_Neutral;
369}
370
371bool GuardWideningImpl::isAvailableAt(Value *V, Instruction *Loc,
372 SmallPtrSetImpl<Instruction *> &Visited) {
373 auto *Inst = dyn_cast<Instruction>(V);
374 if (!Inst || DT.dominates(Inst, Loc) || Visited.count(Inst))
375 return true;
376
377 if (!isSafeToSpeculativelyExecute(Inst, Loc, &DT) ||
378 Inst->mayReadFromMemory())
379 return false;
380
381 Visited.insert(Inst);
382
383 // We only want to go _up_ the dominance chain when recursing.
384 assert(!isa<PHINode>(Loc) &&((!isa<PHINode>(Loc) && "PHIs should return false for isSafeToSpeculativelyExecute"
) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(Loc) && \"PHIs should return false for isSafeToSpeculativelyExecute\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 385, __PRETTY_FUNCTION__))
385 "PHIs should return false for isSafeToSpeculativelyExecute")((!isa<PHINode>(Loc) && "PHIs should return false for isSafeToSpeculativelyExecute"
) ? static_cast<void> (0) : __assert_fail ("!isa<PHINode>(Loc) && \"PHIs should return false for isSafeToSpeculativelyExecute\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 385, __PRETTY_FUNCTION__))
;
386 assert(DT.isReachableFromEntry(Inst->getParent()) &&((DT.isReachableFromEntry(Inst->getParent()) && "We did a DFS from the block entry!"
) ? static_cast<void> (0) : __assert_fail ("DT.isReachableFromEntry(Inst->getParent()) && \"We did a DFS from the block entry!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 387, __PRETTY_FUNCTION__))
387 "We did a DFS from the block entry!")((DT.isReachableFromEntry(Inst->getParent()) && "We did a DFS from the block entry!"
) ? static_cast<void> (0) : __assert_fail ("DT.isReachableFromEntry(Inst->getParent()) && \"We did a DFS from the block entry!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 387, __PRETTY_FUNCTION__))
;
388 return all_of(Inst->operands(),
389 [&](Value *Op) { return isAvailableAt(Op, Loc, Visited); });
390}
391
392void GuardWideningImpl::makeAvailableAt(Value *V, Instruction *Loc) {
393 auto *Inst = dyn_cast<Instruction>(V);
394 if (!Inst || DT.dominates(Inst, Loc))
395 return;
396
397 assert(isSafeToSpeculativelyExecute(Inst, Loc, &DT) &&((isSafeToSpeculativelyExecute(Inst, Loc, &DT) &&
!Inst->mayReadFromMemory() && "Should've checked with isAvailableAt!"
) ? static_cast<void> (0) : __assert_fail ("isSafeToSpeculativelyExecute(Inst, Loc, &DT) && !Inst->mayReadFromMemory() && \"Should've checked with isAvailableAt!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 398, __PRETTY_FUNCTION__))
398 !Inst->mayReadFromMemory() && "Should've checked with isAvailableAt!")((isSafeToSpeculativelyExecute(Inst, Loc, &DT) &&
!Inst->mayReadFromMemory() && "Should've checked with isAvailableAt!"
) ? static_cast<void> (0) : __assert_fail ("isSafeToSpeculativelyExecute(Inst, Loc, &DT) && !Inst->mayReadFromMemory() && \"Should've checked with isAvailableAt!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 398, __PRETTY_FUNCTION__))
;
399
400 for (Value *Op : Inst->operands())
401 makeAvailableAt(Op, Loc);
402
403 Inst->moveBefore(Loc);
404}
405
406bool GuardWideningImpl::widenCondCommon(Value *Cond0, Value *Cond1,
407 Instruction *InsertPt, Value *&Result) {
408 using namespace llvm::PatternMatch;
409
410 {
411 // L >u C0 && L >u C1 -> L >u max(C0, C1)
412 ConstantInt *RHS0, *RHS1;
413 Value *LHS;
414 ICmpInst::Predicate Pred0, Pred1;
415 if (match(Cond0, m_ICmp(Pred0, m_Value(LHS), m_ConstantInt(RHS0))) &&
3
Calling 'm_Value'
6
Returning from 'm_Value'
7
Calling 'm_ConstantInt'
10
Returning from 'm_ConstantInt'
11
Calling 'm_ICmp'
14
Returning from 'm_ICmp'
15
Calling 'match'
50
Returning from 'match'
51
Taking false branch
416 match(Cond1, m_ICmp(Pred1, m_Specific(LHS), m_ConstantInt(RHS1)))) {
417
418 ConstantRange CR0 =
419 ConstantRange::makeExactICmpRegion(Pred0, RHS0->getValue());
420 ConstantRange CR1 =
421 ConstantRange::makeExactICmpRegion(Pred1, RHS1->getValue());
422
423 // SubsetIntersect is a subset of the actual mathematical intersection of
424 // CR0 and CR1, while SupersetIntersect is a superset of the actual
425 // mathematical intersection. If these two ConstantRanges are equal, then
426 // we know we were able to represent the actual mathematical intersection
427 // of CR0 and CR1, and can use the same to generate an icmp instruction.
428 //
429 // Given what we're doing here and the semantics of guards, it would
430 // actually be correct to just use SubsetIntersect, but that may be too
431 // aggressive in cases we care about.
432 auto SubsetIntersect = CR0.inverse().unionWith(CR1.inverse()).inverse();
433 auto SupersetIntersect = CR0.intersectWith(CR1);
434
435 APInt NewRHSAP;
436 CmpInst::Predicate Pred;
437 if (SubsetIntersect == SupersetIntersect &&
438 SubsetIntersect.getEquivalentICmp(Pred, NewRHSAP)) {
439 if (InsertPt) {
440 ConstantInt *NewRHS = ConstantInt::get(Cond0->getContext(), NewRHSAP);
441 Result = new ICmpInst(InsertPt, Pred, LHS, NewRHS, "wide.chk");
442 }
443 return true;
444 }
445 }
446 }
447
448 {
449 SmallVector<GuardWideningImpl::RangeCheck, 4> Checks, CombinedChecks;
450 if (parseRangeChecks(Cond0, Checks) && parseRangeChecks(Cond1, Checks) &&
52
Calling 'GuardWideningImpl::parseRangeChecks'
428
Returning from 'GuardWideningImpl::parseRangeChecks'
429
Calling 'GuardWideningImpl::parseRangeChecks'
805
Returning from 'GuardWideningImpl::parseRangeChecks'
807
Taking true branch
451 combineRangeChecks(Checks, CombinedChecks)) {
806
Assuming the condition is true
452 if (InsertPt) {
808
Taking true branch
453 Result = nullptr;
809
Null pointer value stored to 'Result'
454 for (auto &RC : CombinedChecks) {
810
Assuming '__begin' is equal to '__end'
455 makeAvailableAt(RC.getCheckInst(), InsertPt);
456 if (Result)
457 Result = BinaryOperator::CreateAnd(RC.getCheckInst(), Result, "",
458 InsertPt);
459 else
460 Result = RC.getCheckInst();
461 }
462
463 Result->setName("wide.chk");
811
Calling constructor for 'Twine'
837
Returning from constructor for 'Twine'
838
Called C++ object pointer is null
464 }
465 return true;
466 }
467 }
468
469 // Base case -- just logical-and the two conditions together.
470
471 if (InsertPt) {
472 makeAvailableAt(Cond0, InsertPt);
473 makeAvailableAt(Cond1, InsertPt);
474
475 Result = BinaryOperator::CreateAnd(Cond0, Cond1, "wide.chk", InsertPt);
476 }
477
478 // We were not able to compute Cond0 AND Cond1 for the price of one.
479 return false;
480}
481
482bool GuardWideningImpl::parseRangeChecks(
483 Value *CheckCond, SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
484 SmallPtrSetImpl<Value *> &Visited) {
485 if (!Visited.insert(CheckCond).second)
54
Assuming the condition is false
55
Taking false branch
431
Assuming the condition is false
432
Taking false branch
486 return true;
487
488 using namespace llvm::PatternMatch;
489
490 {
491 Value *AndLHS, *AndRHS;
492 if (match(CheckCond, m_And(m_Value(AndLHS), m_Value(AndRHS))))
56
Calling 'm_Value'
59
Returning from 'm_Value'
60
Calling 'm_Value'
63
Returning from 'm_Value'
64
Calling 'm_And'
67
Returning from 'm_And'
68
Calling 'match'
99
Returning from 'match'
100
Taking false branch
433
Calling 'm_Value'
436
Returning from 'm_Value'
437
Calling 'm_Value'
440
Returning from 'm_Value'
441
Calling 'm_And'
444
Returning from 'm_And'
445
Calling 'match'
476
Returning from 'match'
477
Taking false branch
493 return parseRangeChecks(AndLHS, Checks) &&
494 parseRangeChecks(AndRHS, Checks);
495 }
496
497 auto *IC = dyn_cast<ICmpInst>(CheckCond);
101
Calling 'dyn_cast'
156
Returning from 'dyn_cast'
478
Calling 'dyn_cast'
533
Returning from 'dyn_cast'
498 if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() ||
157
Assuming 'IC' is non-null
158
Calling 'CmpInst::getOperand'
189
Returning from 'CmpInst::getOperand'
190
Calling 'Value::getType'
191
Returning from 'Value::getType'
192
Calling 'Type::isIntegerTy'
196
Returning from 'Type::isIntegerTy'
534
Assuming 'IC' is non-null
535
Calling 'CmpInst::getOperand'
566
Returning from 'CmpInst::getOperand'
567
Calling 'Value::getType'
568
Returning from 'Value::getType'
569
Calling 'Type::isIntegerTy'
573
Returning from 'Type::isIntegerTy'
499 (IC->getPredicate() != ICmpInst::ICMP_ULT &&
197
Calling 'CmpInst::getPredicate'
204
Returning from 'CmpInst::getPredicate'
205
Assuming the condition is false
574
Calling 'CmpInst::getPredicate'
581
Returning from 'CmpInst::getPredicate'
582
Assuming the condition is false
500 IC->getPredicate() != ICmpInst::ICMP_UGT))
501 return false;
502
503 Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1);
206
Calling 'CmpInst::getOperand'
236
Returning from 'CmpInst::getOperand'
237
Calling 'CmpInst::getOperand'
268
Returning from 'CmpInst::getOperand'
583
Calling 'CmpInst::getOperand'
613
Returning from 'CmpInst::getOperand'
614
Calling 'CmpInst::getOperand'
645
Returning from 'CmpInst::getOperand'
504 if (IC->getPredicate() == ICmpInst::ICMP_UGT)
269
Calling 'CmpInst::getPredicate'
276
Returning from 'CmpInst::getPredicate'
277
Taking false branch
646
Calling 'CmpInst::getPredicate'
653
Returning from 'CmpInst::getPredicate'
654
Taking false branch
505 std::swap(CmpLHS, CmpRHS);
506
507 auto &DL = IC->getModule()->getDataLayout();
278
Calling 'Instruction::getModule'
279
Returning from 'Instruction::getModule'
655
Calling 'Instruction::getModule'
656
Returning from 'Instruction::getModule'
508
509 GuardWideningImpl::RangeCheck Check(
305
Calling constructor for 'RangeCheck'
306
Returning from constructor for 'RangeCheck'
682
Calling constructor for 'RangeCheck'
683
Returning from constructor for 'RangeCheck'
510 CmpLHS, cast<ConstantInt>(ConstantInt::getNullValue(CmpRHS->getType())),
280
Calling 'Value::getType'
281
Returning from 'Value::getType'
282
Calling 'cast'
304
Returning from 'cast'
657
Calling 'Value::getType'
658
Returning from 'Value::getType'
659
Calling 'cast'
681
Returning from 'cast'
511 CmpRHS, IC);
512
513 if (!isKnownNonNegative(Check.getLength(), DL))
307
Calling 'RangeCheck::getLength'
308
Returning from 'RangeCheck::getLength'
309
Assuming the condition is false
310
Taking false branch
684
Calling 'RangeCheck::getLength'
685
Returning from 'RangeCheck::getLength'
686
Assuming the condition is false
687
Taking false branch
514 return false;
515
516 // What we have in \c Check now is a correct interpretation of \p CheckCond.
517 // Try to see if we can move some constant offsets into the \c Offset field.
518
519 bool Changed;
520 auto &Ctx = CheckCond->getContext();
521
522 do {
426
Loop condition is false. Exiting loop
803
Loop condition is false. Exiting loop
523 Value *OpLHS;
524 ConstantInt *OpRHS;
525 Changed = false;
526
527#ifndef NDEBUG
528 auto *BaseInst = dyn_cast<Instruction>(Check.getBase());
311
Calling 'RangeCheck::getBase'
312
Returning from 'RangeCheck::getBase'
313
Calling 'dyn_cast'
333
Returning from 'dyn_cast'
688
Calling 'RangeCheck::getBase'
689
Returning from 'RangeCheck::getBase'
690
Calling 'dyn_cast'
710
Returning from 'dyn_cast'
529 assert((!BaseInst || DT.isReachableFromEntry(BaseInst->getParent())) &&(((!BaseInst || DT.isReachableFromEntry(BaseInst->getParent
())) && "Unreachable instruction?") ? static_cast<
void> (0) : __assert_fail ("(!BaseInst || DT.isReachableFromEntry(BaseInst->getParent())) && \"Unreachable instruction?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 530, __PRETTY_FUNCTION__))
334
Within the expansion of the macro 'assert':
711
Within the expansion of the macro 'assert':
530 "Unreachable instruction?")(((!BaseInst || DT.isReachableFromEntry(BaseInst->getParent
())) && "Unreachable instruction?") ? static_cast<
void> (0) : __assert_fail ("(!BaseInst || DT.isReachableFromEntry(BaseInst->getParent())) && \"Unreachable instruction?\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 530, __PRETTY_FUNCTION__))
;
531#endif
532
533 if (match(Check.getBase(), m_Add(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
335
Calling 'RangeCheck::getBase'
336
Returning from 'RangeCheck::getBase'
337
Calling 'm_Value'
340
Returning from 'm_Value'
341
Calling 'm_ConstantInt'
344
Returning from 'm_ConstantInt'
345
Calling 'm_Add'
348
Returning from 'm_Add'
349
Calling 'match'
379
Returning from 'match'
380
Taking false branch
712
Calling 'RangeCheck::getBase'
713
Returning from 'RangeCheck::getBase'
714
Calling 'm_Value'
717
Returning from 'm_Value'
718
Calling 'm_ConstantInt'
721
Returning from 'm_ConstantInt'
722
Calling 'm_Add'
725
Returning from 'm_Add'
726
Calling 'match'
756
Returning from 'match'
757
Taking false branch
534 Check.setBase(OpLHS);
535 APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
536 Check.setOffset(ConstantInt::get(Ctx, NewOffset));
537 Changed = true;
538 } else if (match(Check.getBase(),
381
Calling 'RangeCheck::getBase'
382
Returning from 'RangeCheck::getBase'
395
Calling 'match'
424
Returning from 'match'
425
Taking false branch
758
Calling 'RangeCheck::getBase'
759
Returning from 'RangeCheck::getBase'
772
Calling 'match'
801
Returning from 'match'
802
Taking false branch
539 m_Or(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
383
Calling 'm_Value'
386
Returning from 'm_Value'
387
Calling 'm_ConstantInt'
390
Returning from 'm_ConstantInt'
391
Calling 'm_Or'
394
Returning from 'm_Or'
760
Calling 'm_Value'
763
Returning from 'm_Value'
764
Calling 'm_ConstantInt'
767
Returning from 'm_ConstantInt'
768
Calling 'm_Or'
771
Returning from 'm_Or'
540 KnownBits Known = computeKnownBits(OpLHS, DL);
541 if ((OpRHS->getValue() & Known.Zero) == OpRHS->getValue()) {
542 Check.setBase(OpLHS);
543 APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
544 Check.setOffset(ConstantInt::get(Ctx, NewOffset));
545 Changed = true;
546 }
547 }
548 } while (Changed);
549
550 Checks.push_back(Check);
551 return true;
552}
553
554bool GuardWideningImpl::combineRangeChecks(
555 SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
556 SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) {
557 unsigned OldCount = Checks.size();
558 while (!Checks.empty()) {
559 // Pick all of the range checks with a specific base and length, and try to
560 // merge them.
561 Value *CurrentBase = Checks.front().getBase();
562 Value *CurrentLength = Checks.front().getLength();
563
564 SmallVector<GuardWideningImpl::RangeCheck, 3> CurrentChecks;
565
566 auto IsCurrentCheck = [&](GuardWideningImpl::RangeCheck &RC) {
567 return RC.getBase() == CurrentBase && RC.getLength() == CurrentLength;
568 };
569
570 copy_if(Checks, std::back_inserter(CurrentChecks), IsCurrentCheck);
571 Checks.erase(remove_if(Checks, IsCurrentCheck), Checks.end());
572
573 assert(CurrentChecks.size() != 0 && "We know we have at least one!")((CurrentChecks.size() != 0 && "We know we have at least one!"
) ? static_cast<void> (0) : __assert_fail ("CurrentChecks.size() != 0 && \"We know we have at least one!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 573, __PRETTY_FUNCTION__))
;
574
575 if (CurrentChecks.size() < 3) {
576 RangeChecksOut.insert(RangeChecksOut.end(), CurrentChecks.begin(),
577 CurrentChecks.end());
578 continue;
579 }
580
581 // CurrentChecks.size() will typically be 3 here, but so far there has been
582 // no need to hard-code that fact.
583
584 std::sort(CurrentChecks.begin(), CurrentChecks.end(),
585 [&](const GuardWideningImpl::RangeCheck &LHS,
586 const GuardWideningImpl::RangeCheck &RHS) {
587 return LHS.getOffsetValue().slt(RHS.getOffsetValue());
588 });
589
590 // Note: std::sort should not invalidate the ChecksStart iterator.
591
592 ConstantInt *MinOffset = CurrentChecks.front().getOffset(),
593 *MaxOffset = CurrentChecks.back().getOffset();
594
595 unsigned BitWidth = MaxOffset->getValue().getBitWidth();
596 if ((MaxOffset->getValue() - MinOffset->getValue())
597 .ugt(APInt::getSignedMinValue(BitWidth)))
598 return false;
599
600 APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();
601 const APInt &HighOffset = MaxOffset->getValue();
602 auto OffsetOK = [&](const GuardWideningImpl::RangeCheck &RC) {
603 return (HighOffset - RC.getOffsetValue()).ult(MaxDiff);
604 };
605
606 if (MaxDiff.isMinValue() ||
607 !std::all_of(std::next(CurrentChecks.begin()), CurrentChecks.end(),
608 OffsetOK))
609 return false;
610
611 // We have a series of f+1 checks as:
612 //
613 // I+k_0 u< L ... Chk_0
614 // I+k_1 u< L ... Chk_1
615 // ...
616 // I+k_f u< L ... Chk_f
617 //
618 // with forall i in [0,f]: k_f-k_i u< k_f-k_0 ... Precond_0
619 // k_f-k_0 u< INT_MIN+k_f ... Precond_1
620 // k_f != k_0 ... Precond_2
621 //
622 // Claim:
623 // Chk_0 AND Chk_f implies all the other checks
624 //
625 // Informal proof sketch:
626 //
627 // We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap
628 // (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and
629 // thus I+k_f is the greatest unsigned value in that range.
630 //
631 // This combined with Ckh_(f+1) shows that everything in that range is u< L.
632 // Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1)
633 // lie in [I+k_0,I+k_f], this proving our claim.
634 //
635 // To see that [I+k_0,I+k_f] is not a wrapping range, note that there are
636 // two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal
637 // since k_0 != k_f). In the former case, [I+k_0,I+k_f] is not a wrapping
638 // range by definition, and the latter case is impossible:
639 //
640 // 0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1)
641 // xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
642 //
643 // For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted
644 // with 'x' above) to be at least >u INT_MIN.
645
646 RangeChecksOut.emplace_back(CurrentChecks.front());
647 RangeChecksOut.emplace_back(CurrentChecks.back());
648 }
649
650 assert(RangeChecksOut.size() <= OldCount && "We pessimized!")((RangeChecksOut.size() <= OldCount && "We pessimized!"
) ? static_cast<void> (0) : __assert_fail ("RangeChecksOut.size() <= OldCount && \"We pessimized!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 650, __PRETTY_FUNCTION__))
;
651 return RangeChecksOut.size() != OldCount;
652}
653
654PreservedAnalyses GuardWideningPass::run(Function &F,
655 FunctionAnalysisManager &AM) {
656 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
657 auto &LI = AM.getResult<LoopAnalysis>(F);
658 auto &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
659 if (!GuardWideningImpl(DT, PDT, LI).run())
660 return PreservedAnalyses::all();
661
662 PreservedAnalyses PA;
663 PA.preserveSet<CFGAnalyses>();
664 return PA;
665}
666
667#ifndef NDEBUG
668StringRef GuardWideningImpl::scoreTypeToString(WideningScore WS) {
669 switch (WS) {
670 case WS_IllegalOrNegative:
671 return "IllegalOrNegative";
672 case WS_Neutral:
673 return "Neutral";
674 case WS_Positive:
675 return "Positive";
676 case WS_VeryPositive:
677 return "VeryPositive";
678 }
679
680 llvm_unreachable("Fully covered switch above!")::llvm::llvm_unreachable_internal("Fully covered switch above!"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/lib/Transforms/Scalar/GuardWidening.cpp"
, 680)
;
681}
682#endif
683
684char GuardWideningLegacyPass::ID = 0;
685
686INITIALIZE_PASS_BEGIN(GuardWideningLegacyPass, "guard-widening", "Widen guards",static void *initializeGuardWideningLegacyPassPassOnce(PassRegistry
&Registry) {
687 false, false)static void *initializeGuardWideningLegacyPassPassOnce(PassRegistry
&Registry) {
688INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
689INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)initializePostDominatorTreeWrapperPassPass(Registry);
690INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry);
691INITIALIZE_PASS_END(GuardWideningLegacyPass, "guard-widening", "Widen guards",PassInfo *PI = new PassInfo( "Widen guards", "guard-widening"
, &GuardWideningLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<GuardWideningLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeGuardWideningLegacyPassPassFlag
; void llvm::initializeGuardWideningLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeGuardWideningLegacyPassPassFlag
, initializeGuardWideningLegacyPassPassOnce, std::ref(Registry
)); }
692 false, false)PassInfo *PI = new PassInfo( "Widen guards", "guard-widening"
, &GuardWideningLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<GuardWideningLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeGuardWideningLegacyPassPassFlag
; void llvm::initializeGuardWideningLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeGuardWideningLegacyPassPassFlag
, initializeGuardWideningLegacyPassPassOnce, std::ref(Registry
)); }
693
694FunctionPass *llvm::createGuardWideningPass() {
695 return new GuardWideningLegacyPass();
696}

/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/PatternMatch.h

1//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides a simple and efficient mechanism for performing general
11// tree-based pattern matches on the LLVM IR. The power of these routines is
12// that it allows you to write concise patterns that are expressive and easy to
13// understand. The other major advantage of this is that it allows you to
14// trivially capture/bind elements in the pattern to variables. For example,
15// you can do something like this:
16//
17// Value *Exp = ...
18// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
19// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
20// m_And(m_Value(Y), m_ConstantInt(C2))))) {
21// ... Pattern is matched and variables are bound ...
22// }
23//
24// This is primarily useful to things like the instruction combiner, but can
25// also be useful for static analysis tools or code generators.
26//
27//===----------------------------------------------------------------------===//
28
29#ifndef LLVM_IR_PATTERNMATCH_H
30#define LLVM_IR_PATTERNMATCH_H
31
32#include "llvm/ADT/APFloat.h"
33#include "llvm/ADT/APInt.h"
34#include "llvm/IR/CallSite.h"
35#include "llvm/IR/Constant.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/IR/Instruction.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include <cstdint>
45
46namespace llvm {
47namespace PatternMatch {
48
49template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
50 return const_cast<Pattern &>(P).match(V);
16
Calling 'CmpClass_match::match'
49
Returning from 'CmpClass_match::match'
69
Calling 'BinaryOp_match::match'
98
Returning from 'BinaryOp_match::match'
350
Calling 'BinaryOp_match::match'
378
Returning from 'BinaryOp_match::match'
396
Calling 'BinaryOp_match::match'
423
Returning from 'BinaryOp_match::match'
446
Calling 'BinaryOp_match::match'
475
Returning from 'BinaryOp_match::match'
727
Calling 'BinaryOp_match::match'
755
Returning from 'BinaryOp_match::match'
773
Calling 'BinaryOp_match::match'
800
Returning from 'BinaryOp_match::match'
51}
52
53template <typename SubPattern_t> struct OneUse_match {
54 SubPattern_t SubPattern;
55
56 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
57
58 template <typename OpTy> bool match(OpTy *V) {
59 return V->hasOneUse() && SubPattern.match(V);
60 }
61};
62
63template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
64 return SubPattern;
65}
66
67template <typename Class> struct class_match {
68 template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
69};
70
71/// \brief Match an arbitrary value and ignore it.
72inline class_match<Value> m_Value() { return class_match<Value>(); }
73
74/// \brief Match an arbitrary binary operation and ignore it.
75inline class_match<BinaryOperator> m_BinOp() {
76 return class_match<BinaryOperator>();
77}
78
79/// \brief Matches any compare instruction and ignore it.
80inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
81
82/// \brief Match an arbitrary ConstantInt and ignore it.
83inline class_match<ConstantInt> m_ConstantInt() {
84 return class_match<ConstantInt>();
85}
86
87/// \brief Match an arbitrary undef constant.
88inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
89
90/// \brief Match an arbitrary Constant and ignore it.
91inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
92
93/// Matching combinators
94template <typename LTy, typename RTy> struct match_combine_or {
95 LTy L;
96 RTy R;
97
98 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
99
100 template <typename ITy> bool match(ITy *V) {
101 if (L.match(V))
102 return true;
103 if (R.match(V))
104 return true;
105 return false;
106 }
107};
108
109template <typename LTy, typename RTy> struct match_combine_and {
110 LTy L;
111 RTy R;
112
113 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
114
115 template <typename ITy> bool match(ITy *V) {
116 if (L.match(V))
117 if (R.match(V))
118 return true;
119 return false;
120 }
121};
122
123/// Combine two pattern matchers matching L || R
124template <typename LTy, typename RTy>
125inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
126 return match_combine_or<LTy, RTy>(L, R);
127}
128
129/// Combine two pattern matchers matching L && R
130template <typename LTy, typename RTy>
131inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
132 return match_combine_and<LTy, RTy>(L, R);
133}
134
135struct match_zero {
136 template <typename ITy> bool match(ITy *V) {
137 if (const auto *C = dyn_cast<Constant>(V))
138 return C->isNullValue();
139 return false;
140 }
141};
142
143/// \brief Match an arbitrary zero/null constant. This includes
144/// zero_initializer for vectors and ConstantPointerNull for pointers.
145inline match_zero m_Zero() { return match_zero(); }
146
147struct match_neg_zero {
148 template <typename ITy> bool match(ITy *V) {
149 if (const auto *C = dyn_cast<Constant>(V))
150 return C->isNegativeZeroValue();
151 return false;
152 }
153};
154
155/// \brief Match an arbitrary zero/null constant. This includes
156/// zero_initializer for vectors and ConstantPointerNull for pointers. For
157/// floating point constants, this will match negative zero but not positive
158/// zero
159inline match_neg_zero m_NegZero() { return match_neg_zero(); }
160
161struct match_any_zero {
162 template <typename ITy> bool match(ITy *V) {
163 if (const auto *C = dyn_cast<Constant>(V))
164 return C->isZeroValue();
165 return false;
166 }
167};
168
169/// \brief - Match an arbitrary zero/null constant. This includes
170/// zero_initializer for vectors and ConstantPointerNull for pointers. For
171/// floating point constants, this will match negative zero and positive zero
172inline match_any_zero m_AnyZero() { return match_any_zero(); }
173
174struct match_nan {
175 template <typename ITy> bool match(ITy *V) {
176 if (const auto *C = dyn_cast<ConstantFP>(V))
177 return C->isNaN();
178 return false;
179 }
180};
181
182/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
183inline match_nan m_NaN() { return match_nan(); }
184
185struct match_one {
186 template <typename ITy> bool match(ITy *V) {
187 if (const auto *C = dyn_cast<Constant>(V))
188 return C->isOneValue();
189 return false;
190 }
191};
192
193/// \brief Match an integer 1 or a vector with all elements equal to 1.
194inline match_one m_One() { return match_one(); }
195
196struct match_all_ones {
197 template <typename ITy> bool match(ITy *V) {
198 if (const auto *C = dyn_cast<Constant>(V))
199 return C->isAllOnesValue();
200 return false;
201 }
202};
203
204/// \brief Match an integer or vector with all bits set to true.
205inline match_all_ones m_AllOnes() { return match_all_ones(); }
206
207struct match_sign_mask {
208 template <typename ITy> bool match(ITy *V) {
209 if (const auto *C = dyn_cast<Constant>(V))
210 return C->isMinSignedValue();
211 return false;
212 }
213};
214
215/// \brief Match an integer or vector with only the sign bit(s) set.
216inline match_sign_mask m_SignMask() { return match_sign_mask(); }
217
218struct apint_match {
219 const APInt *&Res;
220
221 apint_match(const APInt *&R) : Res(R) {}
222
223 template <typename ITy> bool match(ITy *V) {
224 if (auto *CI = dyn_cast<ConstantInt>(V)) {
225 Res = &CI->getValue();
226 return true;
227 }
228 if (V->getType()->isVectorTy())
229 if (const auto *C = dyn_cast<Constant>(V))
230 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
231 Res = &CI->getValue();
232 return true;
233 }
234 return false;
235 }
236};
237// Either constexpr if or renaming ConstantFP::getValueAPF to
238// ConstantFP::getValue is needed to do it via single template
239// function for both apint/apfloat.
240struct apfloat_match {
241 const APFloat *&Res;
242 apfloat_match(const APFloat *&R) : Res(R) {}
243 template <typename ITy> bool match(ITy *V) {
244 if (auto *CI = dyn_cast<ConstantFP>(V)) {
245 Res = &CI->getValueAPF();
246 return true;
247 }
248 if (V->getType()->isVectorTy())
249 if (const auto *C = dyn_cast<Constant>(V))
250 if (auto *CI = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) {
251 Res = &CI->getValueAPF();
252 return true;
253 }
254 return false;
255 }
256};
257
258/// \brief Match a ConstantInt or splatted ConstantVector, binding the
259/// specified pointer to the contained APInt.
260inline apint_match m_APInt(const APInt *&Res) { return Res; }
261
262/// \brief Match a ConstantFP or splatted ConstantVector, binding the
263/// specified pointer to the contained APFloat.
264inline apfloat_match m_APFloat(const APFloat *&Res) { return Res; }
265
266template <int64_t Val> struct constantint_match {
267 template <typename ITy> bool match(ITy *V) {
268 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
269 const APInt &CIV = CI->getValue();
270 if (Val >= 0)
271 return CIV == static_cast<uint64_t>(Val);
272 // If Val is negative, and CI is shorter than it, truncate to the right
273 // number of bits. If it is larger, then we have to sign extend. Just
274 // compare their negated values.
275 return -CIV == -Val;
276 }
277 return false;
278 }
279};
280
281/// \brief Match a ConstantInt with a specific value.
282template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
283 return constantint_match<Val>();
284}
285
286/// \brief This helper class is used to match scalar and vector constants that
287/// satisfy a specified predicate.
288template <typename Predicate> struct cst_pred_ty : public Predicate {
289 template <typename ITy> bool match(ITy *V) {
290 if (const auto *CI = dyn_cast<ConstantInt>(V))
291 return this->isValue(CI->getValue());
292 if (V->getType()->isVectorTy())
293 if (const auto *C = dyn_cast<Constant>(V))
294 if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
295 return this->isValue(CI->getValue());
296 return false;
297 }
298};
299
300/// \brief This helper class is used to match scalar and vector constants that
301/// satisfy a specified predicate, and bind them to an APInt.
302template <typename Predicate> struct api_pred_ty : public Predicate {
303 const APInt *&Res;
304
305 api_pred_ty(const APInt *&R) : Res(R) {}
306
307 template <typename ITy> bool match(ITy *V) {
308 if (const auto *CI = dyn_cast<ConstantInt>(V))
309 if (this->isValue(CI->getValue())) {
310 Res = &CI->getValue();
311 return true;
312 }
313 if (V->getType()->isVectorTy())
314 if (const auto *C = dyn_cast<Constant>(V))
315 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
316 if (this->isValue(CI->getValue())) {
317 Res = &CI->getValue();
318 return true;
319 }
320
321 return false;
322 }
323};
324
325struct is_power2 {
326 bool isValue(const APInt &C) { return C.isPowerOf2(); }
327};
328
329/// \brief Match an integer or vector power of 2.
330inline cst_pred_ty<is_power2> m_Power2() { return cst_pred_ty<is_power2>(); }
331inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { return V; }
332
333struct is_maxsignedvalue {
334 bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
335};
336
337inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() { return cst_pred_ty<is_maxsignedvalue>(); }
338inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) { return V; }
339
340template <typename Class> struct bind_ty {
341 Class *&VR;
342
343 bind_ty(Class *&V) : VR(V) {}
344
345 template <typename ITy> bool match(ITy *V) {
346 if (auto *CV = dyn_cast<Class>(V)) {
347 VR = CV;
348 return true;
349 }
350 return false;
351 }
352};
353
354/// \brief Match a value, capturing it if we match.
355inline bind_ty<Value> m_Value(Value *&V) { return V; }
4
Calling constructor for 'bind_ty'
5
Returning from constructor for 'bind_ty'
57
Calling constructor for 'bind_ty'
58
Returning from constructor for 'bind_ty'
61
Calling constructor for 'bind_ty'
62
Returning from constructor for 'bind_ty'
338
Calling constructor for 'bind_ty'
339
Returning from constructor for 'bind_ty'
384
Calling constructor for 'bind_ty'
385
Returning from constructor for 'bind_ty'
434
Calling constructor for 'bind_ty'
435
Returning from constructor for 'bind_ty'
438
Calling constructor for 'bind_ty'
439
Returning from constructor for 'bind_ty'
715
Calling constructor for 'bind_ty'
716
Returning from constructor for 'bind_ty'
761
Calling constructor for 'bind_ty'
762
Returning from constructor for 'bind_ty'
356inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
357
358/// \brief Match an instruction, capturing it if we match.
359inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
360/// \brief Match a binary operator, capturing it if we match.
361inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
362
363/// \brief Match a ConstantInt, capturing the value if we match.
364inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
8
Calling constructor for 'bind_ty'
9
Returning from constructor for 'bind_ty'
342
Calling constructor for 'bind_ty'
343
Returning from constructor for 'bind_ty'
388
Calling constructor for 'bind_ty'
389
Returning from constructor for 'bind_ty'
719
Calling constructor for 'bind_ty'
720
Returning from constructor for 'bind_ty'
765
Calling constructor for 'bind_ty'
766
Returning from constructor for 'bind_ty'
365
366/// \brief Match a Constant, capturing the value if we match.
367inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
368
369/// \brief Match a ConstantFP, capturing the value if we match.
370inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
371
372/// \brief Match a specified Value*.
373struct specificval_ty {
374 const Value *Val;
375
376 specificval_ty(const Value *V) : Val(V) {}
377
378 template <typename ITy> bool match(ITy *V) { return V == Val; }
379};
380
381/// \brief Match if we have a specific specified value.
382inline specificval_ty m_Specific(const Value *V) { return V; }
383
384/// \brief Match a specified floating point value or vector of all elements of
385/// that value.
386struct specific_fpval {
387 double Val;
388
389 specific_fpval(double V) : Val(V) {}
390
391 template <typename ITy> bool match(ITy *V) {
392 if (const auto *CFP = dyn_cast<ConstantFP>(V))
393 return CFP->isExactlyValue(Val);
394 if (V->getType()->isVectorTy())
395 if (const auto *C = dyn_cast<Constant>(V))
396 if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
397 return CFP->isExactlyValue(Val);
398 return false;
399 }
400};
401
402/// \brief Match a specific floating point value or vector with all elements
403/// equal to the value.
404inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
405
406/// \brief Match a float 1.0 or vector with all elements equal to 1.0.
407inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
408
409struct bind_const_intval_ty {
410 uint64_t &VR;
411
412 bind_const_intval_ty(uint64_t &V) : VR(V) {}
413
414 template <typename ITy> bool match(ITy *V) {
415 if (const auto *CV = dyn_cast<ConstantInt>(V))
416 if (CV->getValue().ule(UINT64_MAX(18446744073709551615UL))) {
417 VR = CV->getZExtValue();
418 return true;
419 }
420 return false;
421 }
422};
423
424/// \brief Match a specified integer value or vector of all elements of that
425// value.
426struct specific_intval {
427 uint64_t Val;
428
429 specific_intval(uint64_t V) : Val(V) {}
430
431 template <typename ITy> bool match(ITy *V) {
432 const auto *CI = dyn_cast<ConstantInt>(V);
433 if (!CI && V->getType()->isVectorTy())
434 if (const auto *C = dyn_cast<Constant>(V))
435 CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue());
436
437 return CI && CI->getValue() == Val;
438 }
439};
440
441/// \brief Match a specific integer value or vector with all elements equal to
442/// the value.
443inline specific_intval m_SpecificInt(uint64_t V) { return specific_intval(V); }
444
445/// \brief Match a ConstantInt and bind to its value. This does not match
446/// ConstantInts wider than 64-bits.
447inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
448
449//===----------------------------------------------------------------------===//
450// Matcher for any binary operator.
451//
452template <typename LHS_t, typename RHS_t, bool Commutable = false>
453struct AnyBinaryOp_match {
454 LHS_t L;
455 RHS_t R;
456
457 AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
458
459 template <typename OpTy> bool match(OpTy *V) {
460 if (auto *I = dyn_cast<BinaryOperator>(V))
461 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
462 (Commutable && R.match(I->getOperand(0)) &&
463 L.match(I->getOperand(1)));
464 return false;
465 }
466};
467
468template <typename LHS, typename RHS>
469inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
470 return AnyBinaryOp_match<LHS, RHS>(L, R);
471}
472
473//===----------------------------------------------------------------------===//
474// Matchers for specific binary operators.
475//
476
477template <typename LHS_t, typename RHS_t, unsigned Opcode,
478 bool Commutable = false>
479struct BinaryOp_match {
480 LHS_t L;
481 RHS_t R;
482
483 BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
484
485 template <typename OpTy> bool match(OpTy *V) {
486 if (V->getValueID() == Value::InstructionVal + Opcode) {
70
Calling 'Value::getValueID'
71
Returning from 'Value::getValueID'
72
Assuming the condition is false
73
Taking false branch
351
Calling 'Value::getValueID'
352
Returning from 'Value::getValueID'
353
Taking false branch
397
Calling 'Value::getValueID'
398
Returning from 'Value::getValueID'
399
Taking false branch
447
Calling 'Value::getValueID'
448
Returning from 'Value::getValueID'
449
Assuming the condition is false
450
Taking false branch
728
Calling 'Value::getValueID'
729
Returning from 'Value::getValueID'
730
Taking false branch
774
Calling 'Value::getValueID'
775
Returning from 'Value::getValueID'
776
Taking false branch
487 auto *I = cast<BinaryOperator>(V);
488 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
489 (Commutable && R.match(I->getOperand(0)) &&
490 L.match(I->getOperand(1)));
491 }
492 if (auto *CE = dyn_cast<ConstantExpr>(V))
74
Calling 'dyn_cast'
96
Returning from 'dyn_cast'
97
Taking false branch
354
Calling 'dyn_cast'
376
Returning from 'dyn_cast'
377
Taking false branch
400
Calling 'dyn_cast'
421
Returning from 'dyn_cast'
422
Taking false branch
451
Calling 'dyn_cast'
473
Returning from 'dyn_cast'
474
Taking false branch
731
Calling 'dyn_cast'
753
Returning from 'dyn_cast'
754
Taking false branch
777
Calling 'dyn_cast'
798
Returning from 'dyn_cast'
799
Taking false branch
493 return CE->getOpcode() == Opcode &&
494 ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
495 (Commutable && R.match(CE->getOperand(0)) &&
496 L.match(CE->getOperand(1))));
497 return false;
498 }
499};
500
501template <typename LHS, typename RHS>
502inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
503 const RHS &R) {
504 return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
346
Calling constructor for 'BinaryOp_match'
347
Returning from constructor for 'BinaryOp_match'
723
Calling constructor for 'BinaryOp_match'
724
Returning from constructor for 'BinaryOp_match'
505}
506
507template <typename LHS, typename RHS>
508inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
509 const RHS &R) {
510 return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
511}
512
513template <typename LHS, typename RHS>
514inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
515 const RHS &R) {
516 return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
517}
518
519template <typename LHS, typename RHS>
520inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
521 const RHS &R) {
522 return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
523}
524
525template <typename LHS, typename RHS>
526inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
527 const RHS &R) {
528 return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
529}
530
531template <typename LHS, typename RHS>
532inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
533 const RHS &R) {
534 return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
535}
536
537template <typename LHS, typename RHS>
538inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
539 const RHS &R) {
540 return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
541}
542
543template <typename LHS, typename RHS>
544inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
545 const RHS &R) {
546 return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
547}
548
549template <typename LHS, typename RHS>
550inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
551 const RHS &R) {
552 return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
553}
554
555template <typename LHS, typename RHS>
556inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
557 const RHS &R) {
558 return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
559}
560
561template <typename LHS, typename RHS>
562inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
563 const RHS &R) {
564 return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
565}
566
567template <typename LHS, typename RHS>
568inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
569 const RHS &R) {
570 return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
571}
572
573template <typename LHS, typename RHS>
574inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
575 const RHS &R) {
576 return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
65
Calling constructor for 'BinaryOp_match'
66
Returning from constructor for 'BinaryOp_match'
442
Calling constructor for 'BinaryOp_match'
443
Returning from constructor for 'BinaryOp_match'
577}
578
579template <typename LHS, typename RHS>
580inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
581 const RHS &R) {
582 return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
392
Calling constructor for 'BinaryOp_match'
393
Returning from constructor for 'BinaryOp_match'
769
Calling constructor for 'BinaryOp_match'
770
Returning from constructor for 'BinaryOp_match'
583}
584
585template <typename LHS, typename RHS>
586inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
587 const RHS &R) {
588 return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
589}
590
591template <typename LHS, typename RHS>
592inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
593 const RHS &R) {
594 return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
595}
596
597template <typename LHS, typename RHS>
598inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
599 const RHS &R) {
600 return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
601}
602
603template <typename LHS, typename RHS>
604inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
605 const RHS &R) {
606 return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
607}
608
609template <typename LHS_t, typename RHS_t, unsigned Opcode,
610 unsigned WrapFlags = 0>
611struct OverflowingBinaryOp_match {
612 LHS_t L;
613 RHS_t R;
614
615 OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
616 : L(LHS), R(RHS) {}
617
618 template <typename OpTy> bool match(OpTy *V) {
619 if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
620 if (Op->getOpcode() != Opcode)
621 return false;
622 if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
623 !Op->hasNoUnsignedWrap())
624 return false;
625 if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
626 !Op->hasNoSignedWrap())
627 return false;
628 return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
629 }
630 return false;
631 }
632};
633
634template <typename LHS, typename RHS>
635inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
636 OverflowingBinaryOperator::NoSignedWrap>
637m_NSWAdd(const LHS &L, const RHS &R) {
638 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
639 OverflowingBinaryOperator::NoSignedWrap>(
640 L, R);
641}
642template <typename LHS, typename RHS>
643inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
644 OverflowingBinaryOperator::NoSignedWrap>
645m_NSWSub(const LHS &L, const RHS &R) {
646 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
647 OverflowingBinaryOperator::NoSignedWrap>(
648 L, R);
649}
650template <typename LHS, typename RHS>
651inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
652 OverflowingBinaryOperator::NoSignedWrap>
653m_NSWMul(const LHS &L, const RHS &R) {
654 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
655 OverflowingBinaryOperator::NoSignedWrap>(
656 L, R);
657}
658template <typename LHS, typename RHS>
659inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
660 OverflowingBinaryOperator::NoSignedWrap>
661m_NSWShl(const LHS &L, const RHS &R) {
662 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
663 OverflowingBinaryOperator::NoSignedWrap>(
664 L, R);
665}
666
667template <typename LHS, typename RHS>
668inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
669 OverflowingBinaryOperator::NoUnsignedWrap>
670m_NUWAdd(const LHS &L, const RHS &R) {
671 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
672 OverflowingBinaryOperator::NoUnsignedWrap>(
673 L, R);
674}
675template <typename LHS, typename RHS>
676inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
677 OverflowingBinaryOperator::NoUnsignedWrap>
678m_NUWSub(const LHS &L, const RHS &R) {
679 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
680 OverflowingBinaryOperator::NoUnsignedWrap>(
681 L, R);
682}
683template <typename LHS, typename RHS>
684inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
685 OverflowingBinaryOperator::NoUnsignedWrap>
686m_NUWMul(const LHS &L, const RHS &R) {
687 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
688 OverflowingBinaryOperator::NoUnsignedWrap>(
689 L, R);
690}
691template <typename LHS, typename RHS>
692inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
693 OverflowingBinaryOperator::NoUnsignedWrap>
694m_NUWShl(const LHS &L, const RHS &R) {
695 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
696 OverflowingBinaryOperator::NoUnsignedWrap>(
697 L, R);
698}
699
700//===----------------------------------------------------------------------===//
701// Class that matches a group of binary opcodes.
702//
703template <typename LHS_t, typename RHS_t, typename Predicate>
704struct BinOpPred_match : Predicate {
705 LHS_t L;
706 RHS_t R;
707
708 BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
709
710 template <typename OpTy> bool match(OpTy *V) {
711 if (auto *I = dyn_cast<Instruction>(V))
712 return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
713 R.match(I->getOperand(1));
714 if (auto *CE = dyn_cast<ConstantExpr>(V))
715 return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
716 R.match(CE->getOperand(1));
717 return false;
718 }
719};
720
721struct is_shift_op {
722 bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
723};
724
725struct is_right_shift_op {
726 bool isOpType(unsigned Opcode) {
727 return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
728 }
729};
730
731struct is_logical_shift_op {
732 bool isOpType(unsigned Opcode) {
733 return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
734 }
735};
736
737struct is_bitwiselogic_op {
738 bool isOpType(unsigned Opcode) {
739 return Instruction::isBitwiseLogicOp(Opcode);
740 }
741};
742
743struct is_idiv_op {
744 bool isOpType(unsigned Opcode) {
745 return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
746 }
747};
748
749/// \brief Matches shift operations.
750template <typename LHS, typename RHS>
751inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
752 const RHS &R) {
753 return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
754}
755
756/// \brief Matches logical shift operations.
757template <typename LHS, typename RHS>
758inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
759 const RHS &R) {
760 return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
761}
762
763/// \brief Matches logical shift operations.
764template <typename LHS, typename RHS>
765inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
766m_LogicalShift(const LHS &L, const RHS &R) {
767 return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
768}
769
770/// \brief Matches bitwise logic operations.
771template <typename LHS, typename RHS>
772inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
773m_BitwiseLogic(const LHS &L, const RHS &R) {
774 return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
775}
776
777/// \brief Matches integer division operations.
778template <typename LHS, typename RHS>
779inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
780 const RHS &R) {
781 return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
782}
783
784//===----------------------------------------------------------------------===//
785// Class that matches exact binary ops.
786//
787template <typename SubPattern_t> struct Exact_match {
788 SubPattern_t SubPattern;
789
790 Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
791
792 template <typename OpTy> bool match(OpTy *V) {
793 if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
794 return PEO->isExact() && SubPattern.match(V);
795 return false;
796 }
797};
798
799template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
800 return SubPattern;
801}
802
803//===----------------------------------------------------------------------===//
804// Matchers for CmpInst classes
805//
806
807template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
808 bool Commutable = false>
809struct CmpClass_match {
810 PredicateTy &Predicate;
811 LHS_t L;
812 RHS_t R;
813
814 CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
815 : Predicate(Pred), L(LHS), R(RHS) {}
816
817 template <typename OpTy> bool match(OpTy *V) {
818 if (auto *I = dyn_cast<Class>(V))
17
Calling 'dyn_cast'
47
Returning from 'dyn_cast'
48
Taking false branch
819 if ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
820 (Commutable && R.match(I->getOperand(0)) &&
821 L.match(I->getOperand(1)))) {
822 Predicate = I->getPredicate();
823 return true;
824 }
825 return false;
826 }
827};
828
829template <typename LHS, typename RHS>
830inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
831m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
832 return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
833}
834
835template <typename LHS, typename RHS>
836inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
837m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
838 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
12
Calling constructor for 'CmpClass_match'
13
Returning from constructor for 'CmpClass_match'
839}
840
841template <typename LHS, typename RHS>
842inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
843m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
844 return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
845}
846
847//===----------------------------------------------------------------------===//
848// Matchers for SelectInst classes
849//
850
851template <typename Cond_t, typename LHS_t, typename RHS_t>
852struct SelectClass_match {
853 Cond_t C;
854 LHS_t L;
855 RHS_t R;
856
857 SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, const RHS_t &RHS)
858 : C(Cond), L(LHS), R(RHS) {}
859
860 template <typename OpTy> bool match(OpTy *V) {
861 if (auto *I = dyn_cast<SelectInst>(V))
862 return C.match(I->getOperand(0)) && L.match(I->getOperand(1)) &&
863 R.match(I->getOperand(2));
864 return false;
865 }
866};
867
868template <typename Cond, typename LHS, typename RHS>
869inline SelectClass_match<Cond, LHS, RHS> m_Select(const Cond &C, const LHS &L,
870 const RHS &R) {
871 return SelectClass_match<Cond, LHS, RHS>(C, L, R);
872}
873
874/// \brief This matches a select of two constants, e.g.:
875/// m_SelectCst<-1, 0>(m_Value(V))
876template <int64_t L, int64_t R, typename Cond>
877inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R>>
878m_SelectCst(const Cond &C) {
879 return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
880}
881
882//===----------------------------------------------------------------------===//
883// Matchers for CastInst classes
884//
885
886template <typename Op_t, unsigned Opcode> struct CastClass_match {
887 Op_t Op;
888
889 CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
890
891 template <typename OpTy> bool match(OpTy *V) {
892 if (auto *O = dyn_cast<Operator>(V))
893 return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
894 return false;
895 }
896};
897
898/// \brief Matches BitCast.
899template <typename OpTy>
900inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
901 return CastClass_match<OpTy, Instruction::BitCast>(Op);
902}
903
904/// \brief Matches PtrToInt.
905template <typename OpTy>
906inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
907 return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
908}
909
910/// \brief Matches Trunc.
911template <typename OpTy>
912inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
913 return CastClass_match<OpTy, Instruction::Trunc>(Op);
914}
915
916/// \brief Matches SExt.
917template <typename OpTy>
918inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
919 return CastClass_match<OpTy, Instruction::SExt>(Op);
920}
921
922/// \brief Matches ZExt.
923template <typename OpTy>
924inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
925 return CastClass_match<OpTy, Instruction::ZExt>(Op);
926}
927
928template <typename OpTy>
929inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
930 CastClass_match<OpTy, Instruction::SExt>>
931m_ZExtOrSExt(const OpTy &Op) {
932 return m_CombineOr(m_ZExt(Op), m_SExt(Op));
933}
934
935/// \brief Matches UIToFP.
936template <typename OpTy>
937inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
938 return CastClass_match<OpTy, Instruction::UIToFP>(Op);
939}
940
941/// \brief Matches SIToFP.
942template <typename OpTy>
943inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
944 return CastClass_match<OpTy, Instruction::SIToFP>(Op);
945}
946
947/// \brief Matches FPTrunc
948template <typename OpTy>
949inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
950 return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
951}
952
953/// \brief Matches FPExt
954template <typename OpTy>
955inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
956 return CastClass_match<OpTy, Instruction::FPExt>(Op);
957}
958
959//===----------------------------------------------------------------------===//
960// Matchers for unary operators
961//
962
963template <typename LHS_t> struct not_match {
964 LHS_t L;
965
966 not_match(const LHS_t &LHS) : L(LHS) {}
967
968 template <typename OpTy> bool match(OpTy *V) {
969 if (auto *O = dyn_cast<Operator>(V))
970 if (O->getOpcode() == Instruction::Xor) {
971 if (isAllOnes(O->getOperand(1)))
972 return L.match(O->getOperand(0));
973 if (isAllOnes(O->getOperand(0)))
974 return L.match(O->getOperand(1));
975 }
976 return false;
977 }
978
979private:
980 bool isAllOnes(Value *V) {
981 return isa<Constant>(V) && cast<Constant>(V)->isAllOnesValue();
982 }
983};
984
985template <typename LHS> inline not_match<LHS> m_Not(const LHS &L) { return L; }
986
987template <typename LHS_t> struct neg_match {
988 LHS_t L;
989
990 neg_match(const LHS_t &LHS) : L(LHS) {}
991
992 template <typename OpTy> bool match(OpTy *V) {
993 if (auto *O = dyn_cast<Operator>(V))
994 if (O->getOpcode() == Instruction::Sub)
995 return matchIfNeg(O->getOperand(0), O->getOperand(1));
996 return false;
997 }
998
999private:
1000 bool matchIfNeg(Value *LHS, Value *RHS) {
1001 return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
1002 isa<ConstantAggregateZero>(LHS)) &&
1003 L.match(RHS);
1004 }
1005};
1006
1007/// \brief Match an integer negate.
1008template <typename LHS> inline neg_match<LHS> m_Neg(const LHS &L) { return L; }
1009
1010template <typename LHS_t> struct fneg_match {
1011 LHS_t L;
1012
1013 fneg_match(const LHS_t &LHS) : L(LHS) {}
1014
1015 template <typename OpTy> bool match(OpTy *V) {
1016 if (auto *O = dyn_cast<Operator>(V))
1017 if (O->getOpcode() == Instruction::FSub)
1018 return matchIfFNeg(O->getOperand(0), O->getOperand(1));
1019 return false;
1020 }
1021
1022private:
1023 bool matchIfFNeg(Value *LHS, Value *RHS) {
1024 if (const auto *C = dyn_cast<ConstantFP>(LHS))
1025 return C->isNegativeZeroValue() && L.match(RHS);
1026 return false;
1027 }
1028};
1029
1030/// \brief Match a floating point negate.
1031template <typename LHS> inline fneg_match<LHS> m_FNeg(const LHS &L) {
1032 return L;
1033}
1034
1035//===----------------------------------------------------------------------===//
1036// Matchers for control flow.
1037//
1038
1039struct br_match {
1040 BasicBlock *&Succ;
1041
1042 br_match(BasicBlock *&Succ) : Succ(Succ) {}
1043
1044 template <typename OpTy> bool match(OpTy *V) {
1045 if (auto *BI = dyn_cast<BranchInst>(V))
1046 if (BI->isUnconditional()) {
1047 Succ = BI->getSuccessor(0);
1048 return true;
1049 }
1050 return false;
1051 }
1052};
1053
1054inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
1055
1056template <typename Cond_t> struct brc_match {
1057 Cond_t Cond;
1058 BasicBlock *&T, *&F;
1059
1060 brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f)
1061 : Cond(C), T(t), F(f) {}
1062
1063 template <typename OpTy> bool match(OpTy *V) {
1064 if (auto *BI = dyn_cast<BranchInst>(V))
1065 if (BI->isConditional() && Cond.match(BI->getCondition())) {
1066 T = BI->getSuccessor(0);
1067 F = BI->getSuccessor(1);
1068 return true;
1069 }
1070 return false;
1071 }
1072};
1073
1074template <typename Cond_t>
1075inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
1076 return brc_match<Cond_t>(C, T, F);
1077}
1078
1079//===----------------------------------------------------------------------===//
1080// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
1081//
1082
1083template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
1084 bool Commutable = false>
1085struct MaxMin_match {
1086 LHS_t L;
1087 RHS_t R;
1088
1089 MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1090
1091 template <typename OpTy> bool match(OpTy *V) {
1092 // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
1093 auto *SI = dyn_cast<SelectInst>(V);
1094 if (!SI)
1095 return false;
1096 auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
1097 if (!Cmp)
1098 return false;
1099 // At this point we have a select conditioned on a comparison. Check that
1100 // it is the values returned by the select that are being compared.
1101 Value *TrueVal = SI->getTrueValue();
1102 Value *FalseVal = SI->getFalseValue();
1103 Value *LHS = Cmp->getOperand(0);
1104 Value *RHS = Cmp->getOperand(1);
1105 if ((TrueVal != LHS || FalseVal != RHS) &&
1106 (TrueVal != RHS || FalseVal != LHS))
1107 return false;
1108 typename CmpInst_t::Predicate Pred =
1109 LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
1110 // Does "(x pred y) ? x : y" represent the desired max/min operation?
1111 if (!Pred_t::match(Pred))
1112 return false;
1113 // It does! Bind the operands.
1114 return (L.match(LHS) && R.match(RHS)) ||
1115 (Commutable && R.match(LHS) && L.match(RHS));
1116 }
1117};
1118
1119/// \brief Helper class for identifying signed max predicates.
1120struct smax_pred_ty {
1121 static bool match(ICmpInst::Predicate Pred) {
1122 return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
1123 }
1124};
1125
1126/// \brief Helper class for identifying signed min predicates.
1127struct smin_pred_ty {
1128 static bool match(ICmpInst::Predicate Pred) {
1129 return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
1130 }
1131};
1132
1133/// \brief Helper class for identifying unsigned max predicates.
1134struct umax_pred_ty {
1135 static bool match(ICmpInst::Predicate Pred) {
1136 return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
1137 }
1138};
1139
1140/// \brief Helper class for identifying unsigned min predicates.
1141struct umin_pred_ty {
1142 static bool match(ICmpInst::Predicate Pred) {
1143 return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
1144 }
1145};
1146
1147/// \brief Helper class for identifying ordered max predicates.
1148struct ofmax_pred_ty {
1149 static bool match(FCmpInst::Predicate Pred) {
1150 return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
1151 }
1152};
1153
1154/// \brief Helper class for identifying ordered min predicates.
1155struct ofmin_pred_ty {
1156 static bool match(FCmpInst::Predicate Pred) {
1157 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
1158 }
1159};
1160
1161/// \brief Helper class for identifying unordered max predicates.
1162struct ufmax_pred_ty {
1163 static bool match(FCmpInst::Predicate Pred) {
1164 return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
1165 }
1166};
1167
1168/// \brief Helper class for identifying unordered min predicates.
1169struct ufmin_pred_ty {
1170 static bool match(FCmpInst::Predicate Pred) {
1171 return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
1172 }
1173};
1174
1175template <typename LHS, typename RHS>
1176inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
1177 const RHS &R) {
1178 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
1179}
1180
1181template <typename LHS, typename RHS>
1182inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
1183 const RHS &R) {
1184 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
1185}
1186
1187template <typename LHS, typename RHS>
1188inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
1189 const RHS &R) {
1190 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
1191}
1192
1193template <typename LHS, typename RHS>
1194inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
1195 const RHS &R) {
1196 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
1197}
1198
1199/// \brief Match an 'ordered' floating point maximum function.
1200/// Floating point has one special value 'NaN'. Therefore, there is no total
1201/// order. However, if we can ignore the 'NaN' value (for example, because of a
1202/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1203/// semantics. In the presence of 'NaN' we have to preserve the original
1204/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
1205///
1206/// max(L, R) iff L and R are not NaN
1207/// m_OrdFMax(L, R) = R iff L or R are NaN
1208template <typename LHS, typename RHS>
1209inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
1210 const RHS &R) {
1211 return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
1212}
1213
1214/// \brief Match an 'ordered' floating point minimum function.
1215/// Floating point has one special value 'NaN'. Therefore, there is no total
1216/// order. However, if we can ignore the 'NaN' value (for example, because of a
1217/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1218/// semantics. In the presence of 'NaN' we have to preserve the original
1219/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
1220///
1221/// min(L, R) iff L and R are not NaN
1222/// m_OrdFMin(L, R) = R iff L or R are NaN
1223template <typename LHS, typename RHS>
1224inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
1225 const RHS &R) {
1226 return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
1227}
1228
1229/// \brief Match an 'unordered' floating point maximum function.
1230/// Floating point has one special value 'NaN'. Therefore, there is no total
1231/// order. However, if we can ignore the 'NaN' value (for example, because of a
1232/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1233/// semantics. In the presence of 'NaN' we have to preserve the original
1234/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
1235///
1236/// max(L, R) iff L and R are not NaN
1237/// m_UnordFMax(L, R) = L iff L or R are NaN
1238template <typename LHS, typename RHS>
1239inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
1240m_UnordFMax(const LHS &L, const RHS &R) {
1241 return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
1242}
1243
1244/// \brief Match an 'unordered' floating point minimum function.
1245/// Floating point has one special value 'NaN'. Therefore, there is no total
1246/// order. However, if we can ignore the 'NaN' value (for example, because of a
1247/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1248/// semantics. In the presence of 'NaN' we have to preserve the original
1249/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
1250///
1251/// min(L, R) iff L and R are not NaN
1252/// m_UnordFMin(L, R) = L iff L or R are NaN
1253template <typename LHS, typename RHS>
1254inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
1255m_UnordFMin(const LHS &L, const RHS &R) {
1256 return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
1257}
1258
1259//===----------------------------------------------------------------------===//
1260// Matchers for overflow check patterns: e.g. (a + b) u< a
1261//
1262
1263template <typename LHS_t, typename RHS_t, typename Sum_t>
1264struct UAddWithOverflow_match {
1265 LHS_t L;
1266 RHS_t R;
1267 Sum_t S;
1268
1269 UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
1270 : L(L), R(R), S(S) {}
1271
1272 template <typename OpTy> bool match(OpTy *V) {
1273 Value *ICmpLHS, *ICmpRHS;
1274 ICmpInst::Predicate Pred;
1275 if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
1276 return false;
1277
1278 Value *AddLHS, *AddRHS;
1279 auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
1280
1281 // (a + b) u< a, (a + b) u< b
1282 if (Pred == ICmpInst::ICMP_ULT)
1283 if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
1284 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1285
1286 // a >u (a + b), b >u (a + b)
1287 if (Pred == ICmpInst::ICMP_UGT)
1288 if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
1289 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1290
1291 return false;
1292 }
1293};
1294
1295/// \brief Match an icmp instruction checking for unsigned overflow on addition.
1296///
1297/// S is matched to the addition whose result is being checked for overflow, and
1298/// L and R are matched to the LHS and RHS of S.
1299template <typename LHS_t, typename RHS_t, typename Sum_t>
1300UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
1301m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
1302 return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
1303}
1304
1305template <typename Opnd_t> struct Argument_match {
1306 unsigned OpI;
1307 Opnd_t Val;
1308
1309 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
1310
1311 template <typename OpTy> bool match(OpTy *V) {
1312 CallSite CS(V);
1313 return CS.isCall() && Val.match(CS.getArgument(OpI));
1314 }
1315};
1316
1317/// \brief Match an argument.
1318template <unsigned OpI, typename Opnd_t>
1319inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
1320 return Argument_match<Opnd_t>(OpI, Op);
1321}
1322
1323/// \brief Intrinsic matchers.
1324struct IntrinsicID_match {
1325 unsigned ID;
1326
1327 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
1328
1329 template <typename OpTy> bool match(OpTy *V) {
1330 if (const auto *CI = dyn_cast<CallInst>(V))
1331 if (const auto *F = CI->getCalledFunction())
1332 return F->getIntrinsicID() == ID;
1333 return false;
1334 }
1335};
1336
1337/// Intrinsic matches are combinations of ID matchers, and argument
1338/// matchers. Higher arity matcher are defined recursively in terms of and-ing
1339/// them with lower arity matchers. Here's some convenient typedefs for up to
1340/// several arguments, and more can be added as needed
1341template <typename T0 = void, typename T1 = void, typename T2 = void,
1342 typename T3 = void, typename T4 = void, typename T5 = void,
1343 typename T6 = void, typename T7 = void, typename T8 = void,
1344 typename T9 = void, typename T10 = void>
1345struct m_Intrinsic_Ty;
1346template <typename T0> struct m_Intrinsic_Ty<T0> {
1347 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
1348};
1349template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
1350 using Ty =
1351 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
1352};
1353template <typename T0, typename T1, typename T2>
1354struct m_Intrinsic_Ty<T0, T1, T2> {
1355 using Ty =
1356 match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
1357 Argument_match<T2>>;
1358};
1359template <typename T0, typename T1, typename T2, typename T3>
1360struct m_Intrinsic_Ty<T0, T1, T2, T3> {
1361 using Ty =
1362 match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
1363 Argument_match<T3>>;
1364};
1365
1366/// \brief Match intrinsic calls like this:
1367/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
1368template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
1369 return IntrinsicID_match(IntrID);
1370}
1371
1372template <Intrinsic::ID IntrID, typename T0>
1373inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
1374 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
1375}
1376
1377template <Intrinsic::ID IntrID, typename T0, typename T1>
1378inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
1379 const T1 &Op1) {
1380 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
1381}
1382
1383template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
1384inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
1385m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
1386 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
1387}
1388
1389template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
1390 typename T3>
1391inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
1392m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
1393 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
1394}
1395
1396// Helper intrinsic matching specializations.
1397template <typename Opnd0>
1398inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
1399 return m_Intrinsic<Intrinsic::bitreverse>(Op0);
1400}
1401
1402template <typename Opnd0>
1403inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
1404 return m_Intrinsic<Intrinsic::bswap>(Op0);
1405}
1406
1407template <typename Opnd0, typename Opnd1>
1408inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
1409 const Opnd1 &Op1) {
1410 return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
1411}
1412
1413template <typename Opnd0, typename Opnd1>
1414inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
1415 const Opnd1 &Op1) {
1416 return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
1417}
1418
1419template <typename Opnd_t> struct Signum_match {
1420 Opnd_t Val;
1421 Signum_match(const Opnd_t &V) : Val(V) {}
1422
1423 template <typename OpTy> bool match(OpTy *V) {
1424 unsigned TypeSize = V->getType()->getScalarSizeInBits();
1425 if (TypeSize == 0)
1426 return false;
1427
1428 unsigned ShiftWidth = TypeSize - 1;
1429 Value *OpL = nullptr, *OpR = nullptr;
1430
1431 // This is the representation of signum we match:
1432 //
1433 // signum(x) == (x >> 63) | (-x >>u 63)
1434 //
1435 // An i1 value is its own signum, so it's correct to match
1436 //
1437 // signum(x) == (x >> 0) | (-x >>u 0)
1438 //
1439 // for i1 values.
1440
1441 auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
1442 auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
1443 auto Signum = m_Or(LHS, RHS);
1444
1445 return Signum.match(V) && OpL == OpR && Val.match(OpL);
1446 }
1447};
1448
1449/// \brief Matches a signum pattern.
1450///
1451/// signum(x) =
1452/// x > 0 -> 1
1453/// x == 0 -> 0
1454/// x < 0 -> -1
1455template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
1456 return Signum_match<Val_t>(V);
1457}
1458
1459//===----------------------------------------------------------------------===//
1460// Matchers for two-operands operators with the operators in either order
1461//
1462
1463/// \brief Matches a BinaryOperator with LHS and RHS in either order.
1464template <typename LHS, typename RHS>
1465inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
1466 return AnyBinaryOp_match<LHS, RHS, true>(L, R);
1467}
1468
1469/// \brief Matches an ICmp with a predicate over LHS and RHS in either order.
1470/// Does not swap the predicate.
1471template <typename LHS, typename RHS>
1472inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
1473m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1474 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
1475 R);
1476}
1477
1478/// \brief Matches a Add with LHS and RHS in either order.
1479template <typename LHS, typename RHS>
1480inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
1481 const RHS &R) {
1482 return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
1483}
1484
1485/// \brief Matches a Mul with LHS and RHS in either order.
1486template <typename LHS, typename RHS>
1487inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
1488 const RHS &R) {
1489 return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
1490}
1491
1492/// \brief Matches an And with LHS and RHS in either order.
1493template <typename LHS, typename RHS>
1494inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
1495 const RHS &R) {
1496 return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
1497}
1498
1499/// \brief Matches an Or with LHS and RHS in either order.
1500template <typename LHS, typename RHS>
1501inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
1502 const RHS &R) {
1503 return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
1504}
1505
1506/// \brief Matches an Xor with LHS and RHS in either order.
1507template <typename LHS, typename RHS>
1508inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
1509 const RHS &R) {
1510 return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
1511}
1512
1513/// Matches an SMin with LHS and RHS in either order.
1514template <typename LHS, typename RHS>
1515inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
1516m_c_SMin(const LHS &L, const RHS &R) {
1517 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
1518}
1519/// Matches an SMax with LHS and RHS in either order.
1520template <typename LHS, typename RHS>
1521inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
1522m_c_SMax(const LHS &L, const RHS &R) {
1523 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
1524}
1525/// Matches a UMin with LHS and RHS in either order.
1526template <typename LHS, typename RHS>
1527inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
1528m_c_UMin(const LHS &L, const RHS &R) {
1529 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
1530}
1531/// Matches a UMax with LHS and RHS in either order.
1532template <typename LHS, typename RHS>
1533inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
1534m_c_UMax(const LHS &L, const RHS &R) {
1535 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
1536}
1537
1538} // end namespace PatternMatch
1539} // end namespace llvm
1540
1541#endif // LLVM_IR_PATTERNMATCH_H

/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h

1//===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(),
11// and dyn_cast_or_null<X>() templates.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_SUPPORT_CASTING_H
16#define LLVM_SUPPORT_CASTING_H
17
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/type_traits.h"
20#include <cassert>
21#include <memory>
22#include <type_traits>
23
24namespace llvm {
25
26//===----------------------------------------------------------------------===//
27// isa<x> Support Templates
28//===----------------------------------------------------------------------===//
29
30// Define a template that can be specialized by smart pointers to reflect the
31// fact that they are automatically dereferenced, and are not involved with the
32// template selection process... the default implementation is a noop.
33//
34template<typename From> struct simplify_type {
35 using SimpleType = From; // The real type this represents...
36
37 // An accessor to get the real value...
38 static SimpleType &getSimplifiedValue(From &Val) { return Val; }
39};
40
41template<typename From> struct simplify_type<const From> {
42 using NonConstSimpleType = typename simplify_type<From>::SimpleType;
43 using SimpleType =
44 typename add_const_past_pointer<NonConstSimpleType>::type;
45 using RetType =
46 typename add_lvalue_reference_if_not_pointer<SimpleType>::type;
47
48 static RetType getSimplifiedValue(const From& Val) {
49 return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
21
Calling 'simplify_type::getSimplifiedValue'
22
Returning from 'simplify_type::getSimplifiedValue'
32
Calling 'simplify_type::getSimplifiedValue'
33
Returning from 'simplify_type::getSimplifiedValue'
78
Calling 'simplify_type::getSimplifiedValue'
79
Returning from 'simplify_type::getSimplifiedValue'
105
Calling 'simplify_type::getSimplifiedValue'
106
Returning from 'simplify_type::getSimplifiedValue'
116
Calling 'simplify_type::getSimplifiedValue'
117
Returning from 'simplify_type::getSimplifiedValue'
142
Calling 'simplify_type::getSimplifiedValue'
143
Returning from 'simplify_type::getSimplifiedValue'
165
Calling 'simplify_type::getSimplifiedValue'
166
Returning from 'simplify_type::getSimplifiedValue'
180
Calling 'simplify_type::getSimplifiedValue'
181
Returning from 'simplify_type::getSimplifiedValue'
212
Calling 'simplify_type::getSimplifiedValue'
213
Returning from 'simplify_type::getSimplifiedValue'
227
Calling 'simplify_type::getSimplifiedValue'
228
Returning from 'simplify_type::getSimplifiedValue'
244
Calling 'simplify_type::getSimplifiedValue'
245
Returning from 'simplify_type::getSimplifiedValue'
259
Calling 'simplify_type::getSimplifiedValue'
260
Returning from 'simplify_type::getSimplifiedValue'
286
Calling 'simplify_type::getSimplifiedValue'
287
Returning from 'simplify_type::getSimplifiedValue'
317
Calling 'simplify_type::getSimplifiedValue'
318
Returning from 'simplify_type::getSimplifiedValue'
358
Calling 'simplify_type::getSimplifiedValue'
359
Returning from 'simplify_type::getSimplifiedValue'
404
Calling 'simplify_type::getSimplifiedValue'
405
Returning from 'simplify_type::getSimplifiedValue'
455
Calling 'simplify_type::getSimplifiedValue'
456
Returning from 'simplify_type::getSimplifiedValue'
482
Calling 'simplify_type::getSimplifiedValue'
483
Returning from 'simplify_type::getSimplifiedValue'
493
Calling 'simplify_type::getSimplifiedValue'
494
Returning from 'simplify_type::getSimplifiedValue'
519
Calling 'simplify_type::getSimplifiedValue'
520
Returning from 'simplify_type::getSimplifiedValue'
542
Calling 'simplify_type::getSimplifiedValue'
543
Returning from 'simplify_type::getSimplifiedValue'
557
Calling 'simplify_type::getSimplifiedValue'
558
Returning from 'simplify_type::getSimplifiedValue'
589
Calling 'simplify_type::getSimplifiedValue'
590
Returning from 'simplify_type::getSimplifiedValue'
604
Calling 'simplify_type::getSimplifiedValue'
605
Returning from 'simplify_type::getSimplifiedValue'
621
Calling 'simplify_type::getSimplifiedValue'
622
Returning from 'simplify_type::getSimplifiedValue'
636
Calling 'simplify_type::getSimplifiedValue'
637
Returning from 'simplify_type::getSimplifiedValue'
663
Calling 'simplify_type::getSimplifiedValue'
664
Returning from 'simplify_type::getSimplifiedValue'
694
Calling 'simplify_type::getSimplifiedValue'
695
Returning from 'simplify_type::getSimplifiedValue'
735
Calling 'simplify_type::getSimplifiedValue'
736
Returning from 'simplify_type::getSimplifiedValue'
781
Calling 'simplify_type::getSimplifiedValue'
782
Returning from 'simplify_type::getSimplifiedValue'
50 }
51};
52
53// The core of the implementation of isa<X> is here; To and From should be
54// the names of classes. This template can be specialized to customize the
55// implementation of isa<> without rewriting it from scratch.
56template <typename To, typename From, typename Enabler = void>
57struct isa_impl {
58 static inline bool doit(const From &Val) {
59 return To::classof(&Val);
28
Calling 'ICmpInst::classof'
40
Returning from 'ICmpInst::classof'
85
Calling 'ConstantExpr::classof'
89
Returning from 'ConstantExpr::classof'
112
Calling 'ICmpInst::classof'
131
Returning from 'ICmpInst::classof'
293
Calling 'ConstantInt::classof'
297
Returning from 'ConstantInt::classof'
365
Calling 'ConstantExpr::classof'
369
Returning from 'ConstantExpr::classof'
411
Calling 'ConstantExpr::classof'
414
Returning from 'ConstantExpr::classof'
462
Calling 'ConstantExpr::classof'
466
Returning from 'ConstantExpr::classof'
489
Calling 'ICmpInst::classof'
508
Returning from 'ICmpInst::classof'
670
Calling 'ConstantInt::classof'
674
Returning from 'ConstantInt::classof'
742
Calling 'ConstantExpr::classof'
746
Returning from 'ConstantExpr::classof'
788
Calling 'ConstantExpr::classof'
791
Returning from 'ConstantExpr::classof'
60 }
61};
62
63/// \brief Always allow upcasts, and perform no dynamic check for them.
64template <typename To, typename From>
65struct isa_impl<
66 To, From, typename std::enable_if<std::is_base_of<To, From>::value>::type> {
67 static inline bool doit(const From &) { return true; }
68};
69
70template <typename To, typename From> struct isa_impl_cl {
71 static inline bool doit(const From &Val) {
72 return isa_impl<To, From>::doit(Val);
73 }
74};
75
76template <typename To, typename From> struct isa_impl_cl<To, const From> {
77 static inline bool doit(const From &Val) {
78 return isa_impl<To, From>::doit(Val);
79 }
80};
81
82template <typename To, typename From>
83struct isa_impl_cl<To, const std::unique_ptr<From>> {
84 static inline bool doit(const std::unique_ptr<From> &Val) {
85 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 85, __PRETTY_FUNCTION__))
;
86 return isa_impl_cl<To, From>::doit(*Val);
87 }
88};
89
90template <typename To, typename From> struct isa_impl_cl<To, From*> {
91 static inline bool doit(const From *Val) {
92 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 92, __PRETTY_FUNCTION__))
;
93 return isa_impl<To, From>::doit(*Val);
94 }
95};
96
97template <typename To, typename From> struct isa_impl_cl<To, From*const> {
98 static inline bool doit(const From *Val) {
99 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 99, __PRETTY_FUNCTION__))
;
100 return isa_impl<To, From>::doit(*Val);
101 }
102};
103
104template <typename To, typename From> struct isa_impl_cl<To, const From*> {
105 static inline bool doit(const From *Val) {
106 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 106, __PRETTY_FUNCTION__))
;
26
Within the expansion of the macro 'assert':
83
Within the expansion of the macro 'assert':
110
Within the expansion of the macro 'assert':
147
Within the expansion of the macro 'assert':
170
Within the expansion of the macro 'assert':
217
Within the expansion of the macro 'assert':
249
Within the expansion of the macro 'assert':
291
Within the expansion of the macro 'assert':
a
Assuming 'Val' is non-null
322
Within the expansion of the macro 'assert':
363
Within the expansion of the macro 'assert':
409
Within the expansion of the macro 'assert':
460
Within the expansion of the macro 'assert':
487
Within the expansion of the macro 'assert':
524
Within the expansion of the macro 'assert':
547
Within the expansion of the macro 'assert':
594
Within the expansion of the macro 'assert':
626
Within the expansion of the macro 'assert':
668
Within the expansion of the macro 'assert':
a
Assuming 'Val' is non-null
699
Within the expansion of the macro 'assert':
740
Within the expansion of the macro 'assert':
786
Within the expansion of the macro 'assert':
107 return isa_impl<To, From>::doit(*Val);
27
Calling 'isa_impl::doit'
41
Returning from 'isa_impl::doit'
84
Calling 'isa_impl::doit'
90
Returning from 'isa_impl::doit'
111
Calling 'isa_impl::doit'
132
Returning from 'isa_impl::doit'
148
Calling 'isa_impl::doit'
149
Returning from 'isa_impl::doit'
171
Calling 'isa_impl::doit'
172
Returning from 'isa_impl::doit'
218
Calling 'isa_impl::doit'
219
Returning from 'isa_impl::doit'
250
Calling 'isa_impl::doit'
251
Returning from 'isa_impl::doit'
292
Calling 'isa_impl::doit'
298
Returning from 'isa_impl::doit'
323
Calling 'isa_impl::doit'
327
Returning from 'isa_impl::doit'
364
Calling 'isa_impl::doit'
370
Returning from 'isa_impl::doit'
410
Calling 'isa_impl::doit'
415
Returning from 'isa_impl::doit'
461
Calling 'isa_impl::doit'
467
Returning from 'isa_impl::doit'
488
Calling 'isa_impl::doit'
509
Returning from 'isa_impl::doit'
525
Calling 'isa_impl::doit'
526
Returning from 'isa_impl::doit'
548
Calling 'isa_impl::doit'
549
Returning from 'isa_impl::doit'
595
Calling 'isa_impl::doit'
596
Returning from 'isa_impl::doit'
627
Calling 'isa_impl::doit'
628
Returning from 'isa_impl::doit'
669
Calling 'isa_impl::doit'
675
Returning from 'isa_impl::doit'
700
Calling 'isa_impl::doit'
704
Returning from 'isa_impl::doit'
741
Calling 'isa_impl::doit'
747
Returning from 'isa_impl::doit'
787
Calling 'isa_impl::doit'
792
Returning from 'isa_impl::doit'
108 }
109};
110
111template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
112 static inline bool doit(const From *Val) {
113 assert(Val && "isa<> used on a null pointer")((Val && "isa<> used on a null pointer") ? static_cast
<void> (0) : __assert_fail ("Val && \"isa<> used on a null pointer\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 113, __PRETTY_FUNCTION__))
;
114 return isa_impl<To, From>::doit(*Val);
115 }
116};
117
118template<typename To, typename From, typename SimpleFrom>
119struct isa_impl_wrap {
120 // When From != SimplifiedType, we can simplify the type some more by using
121 // the simplify_type template.
122 static bool doit(const From &Val) {
123 return isa_impl_wrap<To, SimpleFrom,
24
Calling 'isa_impl_wrap::doit'
35
Calling 'isa_impl_wrap::doit'
36
Returning from 'isa_impl_wrap::doit'
43
Returning from 'isa_impl_wrap::doit'
81
Calling 'isa_impl_wrap::doit'
92
Returning from 'isa_impl_wrap::doit'
108
Calling 'isa_impl_wrap::doit'
119
Calling 'isa_impl_wrap::doit'
120
Returning from 'isa_impl_wrap::doit'
134
Returning from 'isa_impl_wrap::doit'
145
Calling 'isa_impl_wrap::doit'
151
Returning from 'isa_impl_wrap::doit'
168
Calling 'isa_impl_wrap::doit'
174
Returning from 'isa_impl_wrap::doit'
183
Calling 'isa_impl_wrap::doit'
184
Returning from 'isa_impl_wrap::doit'
215
Calling 'isa_impl_wrap::doit'
221
Returning from 'isa_impl_wrap::doit'
230
Calling 'isa_impl_wrap::doit'
231
Returning from 'isa_impl_wrap::doit'
247
Calling 'isa_impl_wrap::doit'
253
Returning from 'isa_impl_wrap::doit'
262
Calling 'isa_impl_wrap::doit'
263
Returning from 'isa_impl_wrap::doit'
289
Calling 'isa_impl_wrap::doit'
300
Returning from 'isa_impl_wrap::doit'
320
Calling 'isa_impl_wrap::doit'
329
Returning from 'isa_impl_wrap::doit'
361
Calling 'isa_impl_wrap::doit'
372
Returning from 'isa_impl_wrap::doit'
407
Calling 'isa_impl_wrap::doit'
417
Returning from 'isa_impl_wrap::doit'
458
Calling 'isa_impl_wrap::doit'
469
Returning from 'isa_impl_wrap::doit'
485
Calling 'isa_impl_wrap::doit'
496
Calling 'isa_impl_wrap::doit'
497
Returning from 'isa_impl_wrap::doit'
511
Returning from 'isa_impl_wrap::doit'
522
Calling 'isa_impl_wrap::doit'
528
Returning from 'isa_impl_wrap::doit'
545
Calling 'isa_impl_wrap::doit'
551
Returning from 'isa_impl_wrap::doit'
560
Calling 'isa_impl_wrap::doit'
561
Returning from 'isa_impl_wrap::doit'
592
Calling 'isa_impl_wrap::doit'
598
Returning from 'isa_impl_wrap::doit'
607
Calling 'isa_impl_wrap::doit'
608
Returning from 'isa_impl_wrap::doit'
624
Calling 'isa_impl_wrap::doit'
630
Returning from 'isa_impl_wrap::doit'
639
Calling 'isa_impl_wrap::doit'
640
Returning from 'isa_impl_wrap::doit'
666
Calling 'isa_impl_wrap::doit'
677
Returning from 'isa_impl_wrap::doit'
697
Calling 'isa_impl_wrap::doit'
706
Returning from 'isa_impl_wrap::doit'
738
Calling 'isa_impl_wrap::doit'
749
Returning from 'isa_impl_wrap::doit'
784
Calling 'isa_impl_wrap::doit'
794
Returning from 'isa_impl_wrap::doit'
124 typename simplify_type<SimpleFrom>::SimpleType>::doit(
125 simplify_type<const From>::getSimplifiedValue(Val));
20
Calling 'simplify_type::getSimplifiedValue'
23
Returning from 'simplify_type::getSimplifiedValue'
31
Calling 'simplify_type::getSimplifiedValue'
34
Returning from 'simplify_type::getSimplifiedValue'
77
Calling 'simplify_type::getSimplifiedValue'
80
Returning from 'simplify_type::getSimplifiedValue'
104
Calling 'simplify_type::getSimplifiedValue'
107
Returning from 'simplify_type::getSimplifiedValue'
115
Calling 'simplify_type::getSimplifiedValue'
118
Returning from 'simplify_type::getSimplifiedValue'
141
Calling 'simplify_type::getSimplifiedValue'
144
Returning from 'simplify_type::getSimplifiedValue'
164
Calling 'simplify_type::getSimplifiedValue'
167
Returning from 'simplify_type::getSimplifiedValue'
179
Calling 'simplify_type::getSimplifiedValue'
182
Returning from 'simplify_type::getSimplifiedValue'
211
Calling 'simplify_type::getSimplifiedValue'
214
Returning from 'simplify_type::getSimplifiedValue'
226
Calling 'simplify_type::getSimplifiedValue'
229
Returning from 'simplify_type::getSimplifiedValue'
243
Calling 'simplify_type::getSimplifiedValue'
246
Returning from 'simplify_type::getSimplifiedValue'
258
Calling 'simplify_type::getSimplifiedValue'
261
Returning from 'simplify_type::getSimplifiedValue'
285
Calling 'simplify_type::getSimplifiedValue'
288
Returning from 'simplify_type::getSimplifiedValue'
316
Calling 'simplify_type::getSimplifiedValue'
319
Returning from 'simplify_type::getSimplifiedValue'
357
Calling 'simplify_type::getSimplifiedValue'
360
Returning from 'simplify_type::getSimplifiedValue'
403
Calling 'simplify_type::getSimplifiedValue'
406
Returning from 'simplify_type::getSimplifiedValue'
454
Calling 'simplify_type::getSimplifiedValue'
457
Returning from 'simplify_type::getSimplifiedValue'
481
Calling 'simplify_type::getSimplifiedValue'
484
Returning from 'simplify_type::getSimplifiedValue'
492
Calling 'simplify_type::getSimplifiedValue'
495
Returning from 'simplify_type::getSimplifiedValue'
518
Calling 'simplify_type::getSimplifiedValue'
521
Returning from 'simplify_type::getSimplifiedValue'
541
Calling 'simplify_type::getSimplifiedValue'
544
Returning from 'simplify_type::getSimplifiedValue'
556
Calling 'simplify_type::getSimplifiedValue'
559
Returning from 'simplify_type::getSimplifiedValue'
588
Calling 'simplify_type::getSimplifiedValue'
591
Returning from 'simplify_type::getSimplifiedValue'
603
Calling 'simplify_type::getSimplifiedValue'
606
Returning from 'simplify_type::getSimplifiedValue'
620
Calling 'simplify_type::getSimplifiedValue'
623
Returning from 'simplify_type::getSimplifiedValue'
635
Calling 'simplify_type::getSimplifiedValue'
638
Returning from 'simplify_type::getSimplifiedValue'
662
Calling 'simplify_type::getSimplifiedValue'
665
Returning from 'simplify_type::getSimplifiedValue'
693
Calling 'simplify_type::getSimplifiedValue'
696
Returning from 'simplify_type::getSimplifiedValue'
734
Calling 'simplify_type::getSimplifiedValue'
737
Returning from 'simplify_type::getSimplifiedValue'
780
Calling 'simplify_type::getSimplifiedValue'
783
Returning from 'simplify_type::getSimplifiedValue'
126 }
127};
128
129template<typename To, typename FromTy>
130struct isa_impl_wrap<To, FromTy, FromTy> {
131 // When From == SimpleType, we are as simple as we are going to get.
132 static bool doit(const FromTy &Val) {
133 return isa_impl_cl<To,FromTy>::doit(Val);
25
Calling 'isa_impl_cl::doit'
42
Returning from 'isa_impl_cl::doit'
82
Calling 'isa_impl_cl::doit'
91
Returning from 'isa_impl_cl::doit'
109
Calling 'isa_impl_cl::doit'
133
Returning from 'isa_impl_cl::doit'
146
Calling 'isa_impl_cl::doit'
150
Returning from 'isa_impl_cl::doit'
169
Calling 'isa_impl_cl::doit'
173
Returning from 'isa_impl_cl::doit'
216
Calling 'isa_impl_cl::doit'
220
Returning from 'isa_impl_cl::doit'
248
Calling 'isa_impl_cl::doit'
252
Returning from 'isa_impl_cl::doit'
290
Calling 'isa_impl_cl::doit'
299
Returning from 'isa_impl_cl::doit'
321
Calling 'isa_impl_cl::doit'
328
Returning from 'isa_impl_cl::doit'
362
Calling 'isa_impl_cl::doit'
371
Returning from 'isa_impl_cl::doit'
408
Calling 'isa_impl_cl::doit'
416
Returning from 'isa_impl_cl::doit'
459
Calling 'isa_impl_cl::doit'
468
Returning from 'isa_impl_cl::doit'
486
Calling 'isa_impl_cl::doit'
510
Returning from 'isa_impl_cl::doit'
523
Calling 'isa_impl_cl::doit'
527
Returning from 'isa_impl_cl::doit'
546
Calling 'isa_impl_cl::doit'
550
Returning from 'isa_impl_cl::doit'
593
Calling 'isa_impl_cl::doit'
597
Returning from 'isa_impl_cl::doit'
625
Calling 'isa_impl_cl::doit'
629
Returning from 'isa_impl_cl::doit'
667
Calling 'isa_impl_cl::doit'
676
Returning from 'isa_impl_cl::doit'
698
Calling 'isa_impl_cl::doit'
705
Returning from 'isa_impl_cl::doit'
739
Calling 'isa_impl_cl::doit'
748
Returning from 'isa_impl_cl::doit'
785
Calling 'isa_impl_cl::doit'
793
Returning from 'isa_impl_cl::doit'
134 }
135};
136
137// isa<X> - Return true if the parameter to the template is an instance of the
138// template type argument. Used like this:
139//
140// if (isa<Type>(myVal)) { ... }
141//
142template <class X, class Y> LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isa(const Y &Val) {
143 return isa_impl_wrap<X, const Y,
19
Calling 'isa_impl_wrap::doit'
30
Calling 'isa_impl_wrap::doit'
37
Returning from 'isa_impl_wrap::doit'
44
Returning from 'isa_impl_wrap::doit'
76
Calling 'isa_impl_wrap::doit'
93
Returning from 'isa_impl_wrap::doit'
103
Calling 'isa_impl_wrap::doit'
114
Calling 'isa_impl_wrap::doit'
121
Returning from 'isa_impl_wrap::doit'
135
Returning from 'isa_impl_wrap::doit'
140
Calling 'isa_impl_wrap::doit'
152
Returning from 'isa_impl_wrap::doit'
163
Calling 'isa_impl_wrap::doit'
175
Returning from 'isa_impl_wrap::doit'
178
Calling 'isa_impl_wrap::doit'
185
Returning from 'isa_impl_wrap::doit'
210
Calling 'isa_impl_wrap::doit'
222
Returning from 'isa_impl_wrap::doit'
225
Calling 'isa_impl_wrap::doit'
232
Returning from 'isa_impl_wrap::doit'
242
Calling 'isa_impl_wrap::doit'
254
Returning from 'isa_impl_wrap::doit'
257
Calling 'isa_impl_wrap::doit'
264
Returning from 'isa_impl_wrap::doit'
284
Calling 'isa_impl_wrap::doit'
301
Returning from 'isa_impl_wrap::doit'
315
Calling 'isa_impl_wrap::doit'
330
Returning from 'isa_impl_wrap::doit'
356
Calling 'isa_impl_wrap::doit'
373
Returning from 'isa_impl_wrap::doit'
402
Calling 'isa_impl_wrap::doit'
418
Returning from 'isa_impl_wrap::doit'
453
Calling 'isa_impl_wrap::doit'
470
Returning from 'isa_impl_wrap::doit'
480
Calling 'isa_impl_wrap::doit'
491
Calling 'isa_impl_wrap::doit'
498
Returning from 'isa_impl_wrap::doit'
512
Returning from 'isa_impl_wrap::doit'
517
Calling 'isa_impl_wrap::doit'
529
Returning from 'isa_impl_wrap::doit'
540
Calling 'isa_impl_wrap::doit'
552
Returning from 'isa_impl_wrap::doit'
555
Calling 'isa_impl_wrap::doit'
562
Returning from 'isa_impl_wrap::doit'
587
Calling 'isa_impl_wrap::doit'
599
Returning from 'isa_impl_wrap::doit'
602
Calling 'isa_impl_wrap::doit'
609
Returning from 'isa_impl_wrap::doit'
619
Calling 'isa_impl_wrap::doit'
631
Returning from 'isa_impl_wrap::doit'
634
Calling 'isa_impl_wrap::doit'
641
Returning from 'isa_impl_wrap::doit'
661
Calling 'isa_impl_wrap::doit'
678
Returning from 'isa_impl_wrap::doit'
692
Calling 'isa_impl_wrap::doit'
707
Returning from 'isa_impl_wrap::doit'
733
Calling 'isa_impl_wrap::doit'
750
Returning from 'isa_impl_wrap::doit'
779
Calling 'isa_impl_wrap::doit'
795
Returning from 'isa_impl_wrap::doit'
144 typename simplify_type<const Y>::SimpleType>::doit(Val);
145}
146
147//===----------------------------------------------------------------------===//
148// cast<x> Support Templates
149//===----------------------------------------------------------------------===//
150
151template<class To, class From> struct cast_retty;
152
153// Calculate what type the 'cast' function should return, based on a requested
154// type of To and a source type of From.
155template<class To, class From> struct cast_retty_impl {
156 using ret_type = To &; // Normal case, return Ty&
157};
158template<class To, class From> struct cast_retty_impl<To, const From> {
159 using ret_type = const To &; // Normal case, return Ty&
160};
161
162template<class To, class From> struct cast_retty_impl<To, From*> {
163 using ret_type = To *; // Pointer arg case, return Ty*
164};
165
166template<class To, class From> struct cast_retty_impl<To, const From*> {
167 using ret_type = const To *; // Constant pointer arg case, return const Ty*
168};
169
170template<class To, class From> struct cast_retty_impl<To, const From*const> {
171 using ret_type = const To *; // Constant pointer arg case, return const Ty*
172};
173
174template <class To, class From>
175struct cast_retty_impl<To, std::unique_ptr<From>> {
176private:
177 using PointerType = typename cast_retty_impl<To, From *>::ret_type;
178 using ResultType = typename std::remove_pointer<PointerType>::type;
179
180public:
181 using ret_type = std::unique_ptr<ResultType>;
182};
183
184template<class To, class From, class SimpleFrom>
185struct cast_retty_wrap {
186 // When the simplified type and the from type are not the same, use the type
187 // simplifier to reduce the type, then reuse cast_retty_impl to get the
188 // resultant type.
189 using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
190};
191
192template<class To, class FromTy>
193struct cast_retty_wrap<To, FromTy, FromTy> {
194 // When the simplified type is equal to the from type, use it directly.
195 using ret_type = typename cast_retty_impl<To,FromTy>::ret_type;
196};
197
198template<class To, class From>
199struct cast_retty {
200 using ret_type = typename cast_retty_wrap<
201 To, From, typename simplify_type<From>::SimpleType>::ret_type;
202};
203
204// Ensure the non-simple values are converted using the simplify_type template
205// that may be specialized by smart pointers...
206//
207template<class To, class From, class SimpleFrom> struct cast_convert_val {
208 // This is not a simple type, use the template to simplify it...
209 static typename cast_retty<To, From>::ret_type doit(From &Val) {
210 return cast_convert_val<To, SimpleFrom,
211 typename simplify_type<SimpleFrom>::SimpleType>::doit(
212 simplify_type<From>::getSimplifiedValue(Val));
213 }
214};
215
216template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> {
217 // This _is_ a simple type, just cast it.
218 static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
219 typename cast_retty<To, FromTy>::ret_type Res2
220 = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val);
221 return Res2;
222 }
223};
224
225template <class X> struct is_simple_type {
226 static const bool value =
227 std::is_same<X, typename simplify_type<X>::SimpleType>::value;
228};
229
230// cast<X> - Return the argument parameter cast to the specified type. This
231// casting operator asserts that the type is correct, so it does not return null
232// on failure. It does not allow a null argument (use cast_or_null for that).
233// It is typically used like this:
234//
235// cast<Instruction>(myVal)->getParent()
236//
237template <class X, class Y>
238inline typename std::enable_if<!is_simple_type<Y>::value,
239 typename cast_retty<X, const Y>::ret_type>::type
240cast(const Y &Val) {
241 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 241, __PRETTY_FUNCTION__))
;
242 return cast_convert_val<
243 X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val);
244}
245
246template <class X, class Y>
247inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
248 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 248, __PRETTY_FUNCTION__))
;
249 return cast_convert_val<X, Y,
250 typename simplify_type<Y>::SimpleType>::doit(Val);
251}
252
253template <class X, class Y>
254inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
255 assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 255, __PRETTY_FUNCTION__))
;
139
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
177
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
224
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
256
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
283
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
516
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
554
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
601
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
633
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
c
Assuming the condition is true
660
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
256 return cast_convert_val<X, Y*,
153
Calling 'cast_convert_val::doit'
154
Returning from 'cast_convert_val::doit'
186
Calling 'cast_convert_val::doit'
187
Returning from 'cast_convert_val::doit'
233
Calling 'cast_convert_val::doit'
234
Returning from 'cast_convert_val::doit'
265
Calling 'cast_convert_val::doit'
266
Returning from 'cast_convert_val::doit'
302
Calling 'cast_convert_val::doit'
303
Returning from 'cast_convert_val::doit'
530
Calling 'cast_convert_val::doit'
531
Returning from 'cast_convert_val::doit'
563
Calling 'cast_convert_val::doit'
564
Returning from 'cast_convert_val::doit'
610
Calling 'cast_convert_val::doit'
611
Returning from 'cast_convert_val::doit'
642
Calling 'cast_convert_val::doit'
643
Returning from 'cast_convert_val::doit'
679
Calling 'cast_convert_val::doit'
680
Returning from 'cast_convert_val::doit'
257 typename simplify_type<Y*>::SimpleType>::doit(Val);
258}
259
260template <class X, class Y>
261inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
262cast(std::unique_ptr<Y> &&Val) {
263 assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!")((isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val.get()) && \"cast<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 263, __PRETTY_FUNCTION__))
;
264 using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type;
265 return ret_type(
266 cast_convert_val<X, Y *, typename simplify_type<Y *>::SimpleType>::doit(
267 Val.release()));
268}
269
270// cast_or_null<X> - Functionally identical to cast, except that a null value is
271// accepted.
272//
273template <class X, class Y>
274LLVM_NODISCARD[[clang::warn_unused_result]] inline
275 typename std::enable_if<!is_simple_type<Y>::value,
276 typename cast_retty<X, const Y>::ret_type>::type
277 cast_or_null(const Y &Val) {
278 if (!Val)
279 return nullptr;
280 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 280, __PRETTY_FUNCTION__))
;
281 return cast<X>(Val);
282}
283
284template <class X, class Y>
285LLVM_NODISCARD[[clang::warn_unused_result]] inline
286 typename std::enable_if<!is_simple_type<Y>::value,
287 typename cast_retty<X, Y>::ret_type>::type
288 cast_or_null(Y &Val) {
289 if (!Val)
290 return nullptr;
291 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 291, __PRETTY_FUNCTION__))
;
292 return cast<X>(Val);
293}
294
295template <class X, class Y>
296LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
297cast_or_null(Y *Val) {
298 if (!Val) return nullptr;
160
Assuming 'Val' is non-null
161
Taking false branch
208
Taking false branch
239
Assuming 'Val' is non-null
240
Taking false branch
537
Assuming 'Val' is non-null
538
Taking false branch
585
Taking false branch
616
Assuming 'Val' is non-null
617
Taking false branch
299 assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!")((isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!"
) ? static_cast<void> (0) : __assert_fail ("isa<X>(Val) && \"cast_or_null<Ty>() argument of incompatible type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/Support/Casting.h"
, 299, __PRETTY_FUNCTION__))
;
162
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
209
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
241
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
539
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
586
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
618
Within the expansion of the macro 'assert':
a
Calling 'isa'
b
Returning from 'isa'
300 return cast<X>(Val);
176
Calling 'cast'
188
Returning from 'cast'
223
Calling 'cast'
235
Returning from 'cast'
255
Calling 'cast'
267
Returning from 'cast'
553
Calling 'cast'
565
Returning from 'cast'
600
Calling 'cast'
612
Returning from 'cast'
632
Calling 'cast'
644
Returning from 'cast'
301}
302
303template <class X, class Y>
304inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type
305cast_or_null(std::unique_ptr<Y> &&Val) {
306 if (!Val)
307 return nullptr;
308 return cast<X>(std::move(Val));
309}
310
311// dyn_cast<X> - Return the argument parameter cast to the specified type. This
312// casting operator returns null if the argument is of the wrong type, so it can
313// be used to test for a type as well as cast if successful. This should be
314// used in the context of an if statement like this:
315//
316// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
317//
318
319template <class X, class Y>
320LLVM_NODISCARD[[clang::warn_unused_result]] inline
321 typename std::enable_if<!is_simple_type<Y>::value,
322 typename cast_retty<X, const Y>::ret_type>::type
323 dyn_cast(const Y &Val) {
324 return isa<X>(Val) ? cast<X>(Val) : nullptr;
325}
326
327template <class X, class Y>
328LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
329 return isa<X>(Val) ? cast<X>(Val) : nullptr;
330}
331
332template <class X, class Y>
333LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
334 return isa<X>(Val) ? cast<X>(Val) : nullptr;
18
Calling 'isa'
45
Returning from 'isa'
46
'?' condition is false
75
Calling 'isa'
94
Returning from 'isa'
95
'?' condition is false
102
Calling 'isa'
136
Returning from 'isa'
137
'?' condition is true
138
Calling 'cast'
155
Returning from 'cast'
314
Calling 'isa'
331
Returning from 'isa'
332
'?' condition is false
355
Calling 'isa'
374
Returning from 'isa'
375
'?' condition is false
401
Calling 'isa'
419
Returning from 'isa'
420
'?' condition is false
452
Calling 'isa'
471
Returning from 'isa'
472
'?' condition is false
479
Calling 'isa'
513
Returning from 'isa'
514
'?' condition is true
515
Calling 'cast'
532
Returning from 'cast'
691
Calling 'isa'
708
Returning from 'isa'
709
'?' condition is false
732
Calling 'isa'
751
Returning from 'isa'
752
'?' condition is false
778
Calling 'isa'
796
Returning from 'isa'
797
'?' condition is false
335}
336
337// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
338// value is accepted.
339//
340template <class X, class Y>
341LLVM_NODISCARD[[clang::warn_unused_result]] inline
342 typename std::enable_if<!is_simple_type<Y>::value,
343 typename cast_retty<X, const Y>::ret_type>::type
344 dyn_cast_or_null(const Y &Val) {
345 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
346}
347
348template <class X, class Y>
349LLVM_NODISCARD[[clang::warn_unused_result]] inline
350 typename std::enable_if<!is_simple_type<Y>::value,
351 typename cast_retty<X, Y>::ret_type>::type
352 dyn_cast_or_null(Y &Val) {
353 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
354}
355
356template <class X, class Y>
357LLVM_NODISCARD[[clang::warn_unused_result]] inline typename cast_retty<X, Y *>::ret_type
358dyn_cast_or_null(Y *Val) {
359 return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
360}
361
362// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
363// taking ownership of the input pointer iff isa<X>(Val) is true. If the
364// cast is successful, From refers to nullptr on exit and the casted value
365// is returned. If the cast is unsuccessful, the function returns nullptr
366// and From is unchanged.
367template <class X, class Y>
368LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &Val)
369 -> decltype(cast<X>(Val)) {
370 if (!isa<X>(Val))
371 return nullptr;
372 return cast<X>(std::move(Val));
373}
374
375template <class X, class Y>
376LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val)
377 -> decltype(cast<X>(Val)) {
378 return unique_dyn_cast<X, Y>(Val);
379}
380
381// dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that
382// a null value is accepted.
383template <class X, class Y>
384LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &Val)
385 -> decltype(cast<X>(Val)) {
386 if (!Val)
387 return nullptr;
388 return unique_dyn_cast<X, Y>(Val);
389}
390
391template <class X, class Y>
392LLVM_NODISCARD[[clang::warn_unused_result]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val)
393 -> decltype(cast<X>(Val)) {
394 return unique_dyn_cast_or_null<X, Y>(Val);
395}
396
397} // end namespace llvm
398
399#endif // LLVM_SUPPORT_CASTING_H

/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file exposes the class definitions of all of the subclasses of the
11// Instruction class. This is meant to be an easy way to get access to all
12// instruction subclasses.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_IR_INSTRUCTIONS_H
17#define LLVM_IR_INSTRUCTIONS_H
18
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Function.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/OperandTraits.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Use.h"
38#include "llvm/IR/User.h"
39#include "llvm/IR/Value.h"
40#include "llvm/Support/AtomicOrdering.h"
41#include "llvm/Support/Casting.h"
42#include "llvm/Support/ErrorHandling.h"
43#include <cassert>
44#include <cstddef>
45#include <cstdint>
46#include <iterator>
47
48namespace llvm {
49
50class APInt;
51class ConstantInt;
52class DataLayout;
53class LLVMContext;
54
55//===----------------------------------------------------------------------===//
56// AllocaInst Class
57//===----------------------------------------------------------------------===//
58
59/// an instruction to allocate memory on the stack
60class AllocaInst : public UnaryInstruction {
61 Type *AllocatedType;
62
63protected:
64 // Note: Instruction needs to be a friend here to call cloneImpl.
65 friend class Instruction;
66
67 AllocaInst *cloneImpl() const;
68
69public:
70 explicit AllocaInst(Type *Ty, unsigned AddrSpace,
71 Value *ArraySize = nullptr,
72 const Twine &Name = "",
73 Instruction *InsertBefore = nullptr);
74 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
75 const Twine &Name, BasicBlock *InsertAtEnd);
76
77 AllocaInst(Type *Ty, unsigned AddrSpace,
78 const Twine &Name, Instruction *InsertBefore = nullptr);
79 AllocaInst(Type *Ty, unsigned AddrSpace,
80 const Twine &Name, BasicBlock *InsertAtEnd);
81
82 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
83 const Twine &Name = "", Instruction *InsertBefore = nullptr);
84 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
85 const Twine &Name, BasicBlock *InsertAtEnd);
86
87 /// Return true if there is an allocation size parameter to the allocation
88 /// instruction that is not 1.
89 bool isArrayAllocation() const;
90
91 /// Get the number of elements allocated. For a simple allocation of a single
92 /// element, this will return a constant 1 value.
93 const Value *getArraySize() const { return getOperand(0); }
94 Value *getArraySize() { return getOperand(0); }
95
96 /// Overload to return most specific pointer type.
97 PointerType *getType() const {
98 return cast<PointerType>(Instruction::getType());
99 }
100
101 /// Return the type that is being allocated by the instruction.
102 Type *getAllocatedType() const { return AllocatedType; }
103 /// for use only in special circumstances that need to generically
104 /// transform a whole instruction (eg: IR linking and vectorization).
105 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
106
107 /// Return the alignment of the memory that is being allocated by the
108 /// instruction.
109 unsigned getAlignment() const {
110 return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
111 }
112 void setAlignment(unsigned Align);
113
114 /// Return true if this alloca is in the entry block of the function and is a
115 /// constant size. If so, the code generator will fold it into the
116 /// prolog/epilog code, so it is basically free.
117 bool isStaticAlloca() const;
118
119 /// Return true if this alloca is used as an inalloca argument to a call. Such
120 /// allocas are never considered static even if they are in the entry block.
121 bool isUsedWithInAlloca() const {
122 return getSubclassDataFromInstruction() & 32;
123 }
124
125 /// Specify whether this alloca is used to represent the arguments to a call.
126 void setUsedWithInAlloca(bool V) {
127 setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
128 (V ? 32 : 0));
129 }
130
131 /// Return true if this alloca is used as a swifterror argument to a call.
132 bool isSwiftError() const {
133 return getSubclassDataFromInstruction() & 64;
134 }
135
136 /// Specify whether this alloca is used to represent a swifterror.
137 void setSwiftError(bool V) {
138 setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) |
139 (V ? 64 : 0));
140 }
141
142 // Methods for support type inquiry through isa, cast, and dyn_cast:
143 static bool classof(const Instruction *I) {
144 return (I->getOpcode() == Instruction::Alloca);
145 }
146 static bool classof(const Value *V) {
147 return isa<Instruction>(V) && classof(cast<Instruction>(V));
148 }
149
150private:
151 // Shadow Instruction::setInstructionSubclassData with a private forwarding
152 // method so that subclasses cannot accidentally use it.
153 void setInstructionSubclassData(unsigned short D) {
154 Instruction::setInstructionSubclassData(D);
155 }
156};
157
158//===----------------------------------------------------------------------===//
159// LoadInst Class
160//===----------------------------------------------------------------------===//
161
162/// An instruction for reading from memory. This uses the SubclassData field in
163/// Value to store whether or not the load is volatile.
164class LoadInst : public UnaryInstruction {
165 void AssertOK();
166
167protected:
168 // Note: Instruction needs to be a friend here to call cloneImpl.
169 friend class Instruction;
170
171 LoadInst *cloneImpl() const;
172
173public:
174 LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
175 LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
176 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
177 Instruction *InsertBefore = nullptr);
178 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
179 Instruction *InsertBefore = nullptr)
180 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
181 NameStr, isVolatile, InsertBefore) {}
182 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
183 BasicBlock *InsertAtEnd);
184 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
185 Instruction *InsertBefore = nullptr)
186 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
187 NameStr, isVolatile, Align, InsertBefore) {}
188 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
189 unsigned Align, Instruction *InsertBefore = nullptr);
190 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
191 unsigned Align, BasicBlock *InsertAtEnd);
192 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
193 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
194 Instruction *InsertBefore = nullptr)
195 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
196 NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 unsigned Align, AtomicOrdering Order,
199 SyncScope::ID SSID = SyncScope::System,
200 Instruction *InsertBefore = nullptr);
201 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
202 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
203 BasicBlock *InsertAtEnd);
204 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
205 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
206 LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
207 bool isVolatile = false, Instruction *InsertBefore = nullptr);
208 explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
209 bool isVolatile = false,
210 Instruction *InsertBefore = nullptr)
211 : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
212 NameStr, isVolatile, InsertBefore) {}
213 LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
214 BasicBlock *InsertAtEnd);
215
216 /// Return true if this is a load from a volatile memory location.
217 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
218
219 /// Specify whether this is a volatile load or not.
220 void setVolatile(bool V) {
221 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
222 (V ? 1 : 0));
223 }
224
225 /// Return the alignment of the access that is being performed.
226 unsigned getAlignment() const {
227 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
228 }
229
230 void setAlignment(unsigned Align);
231
232 /// Returns the ordering constraint of this load instruction.
233 AtomicOrdering getOrdering() const {
234 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
235 }
236
237 /// Sets the ordering constraint of this load instruction. May not be Release
238 /// or AcquireRelease.
239 void setOrdering(AtomicOrdering Ordering) {
240 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
241 ((unsigned)Ordering << 7));
242 }
243
244 /// Returns the synchronization scope ID of this load instruction.
245 SyncScope::ID getSyncScopeID() const {
246 return SSID;
247 }
248
249 /// Sets the synchronization scope ID of this load instruction.
250 void setSyncScopeID(SyncScope::ID SSID) {
251 this->SSID = SSID;
252 }
253
254 /// Sets the ordering constraint and the synchronization scope ID of this load
255 /// instruction.
256 void setAtomic(AtomicOrdering Ordering,
257 SyncScope::ID SSID = SyncScope::System) {
258 setOrdering(Ordering);
259 setSyncScopeID(SSID);
260 }
261
262 bool isSimple() const { return !isAtomic() && !isVolatile(); }
263
264 bool isUnordered() const {
265 return (getOrdering() == AtomicOrdering::NotAtomic ||
266 getOrdering() == AtomicOrdering::Unordered) &&
267 !isVolatile();
268 }
269
270 Value *getPointerOperand() { return getOperand(0); }
271 const Value *getPointerOperand() const { return getOperand(0); }
272 static unsigned getPointerOperandIndex() { return 0U; }
273 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
274
275 /// Returns the address space of the pointer operand.
276 unsigned getPointerAddressSpace() const {
277 return getPointerOperandType()->getPointerAddressSpace();
278 }
279
280 // Methods for support type inquiry through isa, cast, and dyn_cast:
281 static bool classof(const Instruction *I) {
282 return I->getOpcode() == Instruction::Load;
283 }
284 static bool classof(const Value *V) {
285 return isa<Instruction>(V) && classof(cast<Instruction>(V));
286 }
287
288private:
289 // Shadow Instruction::setInstructionSubclassData with a private forwarding
290 // method so that subclasses cannot accidentally use it.
291 void setInstructionSubclassData(unsigned short D) {
292 Instruction::setInstructionSubclassData(D);
293 }
294
295 /// The synchronization scope ID of this load instruction. Not quite enough
296 /// room in SubClassData for everything, so synchronization scope ID gets its
297 /// own field.
298 SyncScope::ID SSID;
299};
300
301//===----------------------------------------------------------------------===//
302// StoreInst Class
303//===----------------------------------------------------------------------===//
304
305/// An instruction for storing to memory.
306class StoreInst : public Instruction {
307 void AssertOK();
308
309protected:
310 // Note: Instruction needs to be a friend here to call cloneImpl.
311 friend class Instruction;
312
313 StoreInst *cloneImpl() const;
314
315public:
316 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
317 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
319 Instruction *InsertBefore = nullptr);
320 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
322 unsigned Align, Instruction *InsertBefore = nullptr);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
324 unsigned Align, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
326 unsigned Align, AtomicOrdering Order,
327 SyncScope::ID SSID = SyncScope::System,
328 Instruction *InsertBefore = nullptr);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
330 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
331 BasicBlock *InsertAtEnd);
332
333 // allocate space for exactly two operands
334 void *operator new(size_t s) {
335 return User::operator new(s, 2);
336 }
337
338 /// Return true if this is a store to a volatile memory location.
339 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
340
341 /// Specify whether this is a volatile store or not.
342 void setVolatile(bool V) {
343 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
344 (V ? 1 : 0));
345 }
346
347 /// Transparently provide more efficient getOperand methods.
348 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
349
350 /// Return the alignment of the access that is being performed
351 unsigned getAlignment() const {
352 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
353 }
354
355 void setAlignment(unsigned Align);
356
357 /// Returns the ordering constraint of this store instruction.
358 AtomicOrdering getOrdering() const {
359 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
360 }
361
362 /// Sets the ordering constraint of this store instruction. May not be
363 /// Acquire or AcquireRelease.
364 void setOrdering(AtomicOrdering Ordering) {
365 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
366 ((unsigned)Ordering << 7));
367 }
368
369 /// Returns the synchronization scope ID of this store instruction.
370 SyncScope::ID getSyncScopeID() const {
371 return SSID;
372 }
373
374 /// Sets the synchronization scope ID of this store instruction.
375 void setSyncScopeID(SyncScope::ID SSID) {
376 this->SSID = SSID;
377 }
378
379 /// Sets the ordering constraint and the synchronization scope ID of this
380 /// store instruction.
381 void setAtomic(AtomicOrdering Ordering,
382 SyncScope::ID SSID = SyncScope::System) {
383 setOrdering(Ordering);
384 setSyncScopeID(SSID);
385 }
386
387 bool isSimple() const { return !isAtomic() && !isVolatile(); }
388
389 bool isUnordered() const {
390 return (getOrdering() == AtomicOrdering::NotAtomic ||
391 getOrdering() == AtomicOrdering::Unordered) &&
392 !isVolatile();
393 }
394
395 Value *getValueOperand() { return getOperand(0); }
396 const Value *getValueOperand() const { return getOperand(0); }
397
398 Value *getPointerOperand() { return getOperand(1); }
399 const Value *getPointerOperand() const { return getOperand(1); }
400 static unsigned getPointerOperandIndex() { return 1U; }
401 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
402
403 /// Returns the address space of the pointer operand.
404 unsigned getPointerAddressSpace() const {
405 return getPointerOperandType()->getPointerAddressSpace();
406 }
407
408 // Methods for support type inquiry through isa, cast, and dyn_cast:
409 static bool classof(const Instruction *I) {
410 return I->getOpcode() == Instruction::Store;
411 }
412 static bool classof(const Value *V) {
413 return isa<Instruction>(V) && classof(cast<Instruction>(V));
414 }
415
416private:
417 // Shadow Instruction::setInstructionSubclassData with a private forwarding
418 // method so that subclasses cannot accidentally use it.
419 void setInstructionSubclassData(unsigned short D) {
420 Instruction::setInstructionSubclassData(D);
421 }
422
423 /// The synchronization scope ID of this store instruction. Not quite enough
424 /// room in SubClassData for everything, so synchronization scope ID gets its
425 /// own field.
426 SyncScope::ID SSID;
427};
428
429template <>
430struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
431};
432
433DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<StoreInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 433, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst
*>(this))[i_nocapture].get()); } void StoreInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 433, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst
::getNumOperands() const { return OperandTraits<StoreInst>
::operands(this); } template <int Idx_nocapture> Use &
StoreInst::Op() { return this->OpFrom<Idx_nocapture>
(this); } template <int Idx_nocapture> const Use &StoreInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
434
435//===----------------------------------------------------------------------===//
436// FenceInst Class
437//===----------------------------------------------------------------------===//
438
439/// An instruction for ordering other memory operations.
440class FenceInst : public Instruction {
441 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
442
443protected:
444 // Note: Instruction needs to be a friend here to call cloneImpl.
445 friend class Instruction;
446
447 FenceInst *cloneImpl() const;
448
449public:
450 // Ordering may only be Acquire, Release, AcquireRelease, or
451 // SequentiallyConsistent.
452 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
453 SyncScope::ID SSID = SyncScope::System,
454 Instruction *InsertBefore = nullptr);
455 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
456 BasicBlock *InsertAtEnd);
457
458 // allocate space for exactly zero operands
459 void *operator new(size_t s) {
460 return User::operator new(s, 0);
461 }
462
463 /// Returns the ordering constraint of this fence instruction.
464 AtomicOrdering getOrdering() const {
465 return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
466 }
467
468 /// Sets the ordering constraint of this fence instruction. May only be
469 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
470 void setOrdering(AtomicOrdering Ordering) {
471 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
472 ((unsigned)Ordering << 1));
473 }
474
475 /// Returns the synchronization scope ID of this fence instruction.
476 SyncScope::ID getSyncScopeID() const {
477 return SSID;
478 }
479
480 /// Sets the synchronization scope ID of this fence instruction.
481 void setSyncScopeID(SyncScope::ID SSID) {
482 this->SSID = SSID;
483 }
484
485 // Methods for support type inquiry through isa, cast, and dyn_cast:
486 static bool classof(const Instruction *I) {
487 return I->getOpcode() == Instruction::Fence;
488 }
489 static bool classof(const Value *V) {
490 return isa<Instruction>(V) && classof(cast<Instruction>(V));
491 }
492
493private:
494 // Shadow Instruction::setInstructionSubclassData with a private forwarding
495 // method so that subclasses cannot accidentally use it.
496 void setInstructionSubclassData(unsigned short D) {
497 Instruction::setInstructionSubclassData(D);
498 }
499
500 /// The synchronization scope ID of this fence instruction. Not quite enough
501 /// room in SubClassData for everything, so synchronization scope ID gets its
502 /// own field.
503 SyncScope::ID SSID;
504};
505
506//===----------------------------------------------------------------------===//
507// AtomicCmpXchgInst Class
508//===----------------------------------------------------------------------===//
509
510/// an instruction that atomically checks whether a
511/// specified value is in a memory location, and, if it is, stores a new value
512/// there. Returns the value that was loaded.
513///
514class AtomicCmpXchgInst : public Instruction {
515 void Init(Value *Ptr, Value *Cmp, Value *NewVal,
516 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
517 SyncScope::ID SSID);
518
519protected:
520 // Note: Instruction needs to be a friend here to call cloneImpl.
521 friend class Instruction;
522
523 AtomicCmpXchgInst *cloneImpl() const;
524
525public:
526 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
527 AtomicOrdering SuccessOrdering,
528 AtomicOrdering FailureOrdering,
529 SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
530 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
531 AtomicOrdering SuccessOrdering,
532 AtomicOrdering FailureOrdering,
533 SyncScope::ID SSID, BasicBlock *InsertAtEnd);
534
535 // allocate space for exactly three operands
536 void *operator new(size_t s) {
537 return User::operator new(s, 3);
538 }
539
540 /// Return true if this is a cmpxchg from a volatile memory
541 /// location.
542 ///
543 bool isVolatile() const {
544 return getSubclassDataFromInstruction() & 1;
545 }
546
547 /// Specify whether this is a volatile cmpxchg.
548 ///
549 void setVolatile(bool V) {
550 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
551 (unsigned)V);
552 }
553
554 /// Return true if this cmpxchg may spuriously fail.
555 bool isWeak() const {
556 return getSubclassDataFromInstruction() & 0x100;
557 }
558
559 void setWeak(bool IsWeak) {
560 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
561 (IsWeak << 8));
562 }
563
564 /// Transparently provide more efficient getOperand methods.
565 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
566
567 /// Returns the success ordering constraint of this cmpxchg instruction.
568 AtomicOrdering getSuccessOrdering() const {
569 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
570 }
571
572 /// Sets the success ordering constraint of this cmpxchg instruction.
573 void setSuccessOrdering(AtomicOrdering Ordering) {
574 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 575, __PRETTY_FUNCTION__))
575 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 575, __PRETTY_FUNCTION__))
;
576 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
577 ((unsigned)Ordering << 2));
578 }
579
580 /// Returns the failure ordering constraint of this cmpxchg instruction.
581 AtomicOrdering getFailureOrdering() const {
582 return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
583 }
584
585 /// Sets the failure ordering constraint of this cmpxchg instruction.
586 void setFailureOrdering(AtomicOrdering Ordering) {
587 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 588, __PRETTY_FUNCTION__))
588 "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 588, __PRETTY_FUNCTION__))
;
589 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
590 ((unsigned)Ordering << 5));
591 }
592
593 /// Returns the synchronization scope ID of this cmpxchg instruction.
594 SyncScope::ID getSyncScopeID() const {
595 return SSID;
596 }
597
598 /// Sets the synchronization scope ID of this cmpxchg instruction.
599 void setSyncScopeID(SyncScope::ID SSID) {
600 this->SSID = SSID;
601 }
602
603 Value *getPointerOperand() { return getOperand(0); }
604 const Value *getPointerOperand() const { return getOperand(0); }
605 static unsigned getPointerOperandIndex() { return 0U; }
606
607 Value *getCompareOperand() { return getOperand(1); }
608 const Value *getCompareOperand() const { return getOperand(1); }
609
610 Value *getNewValOperand() { return getOperand(2); }
611 const Value *getNewValOperand() const { return getOperand(2); }
612
613 /// Returns the address space of the pointer operand.
614 unsigned getPointerAddressSpace() const {
615 return getPointerOperand()->getType()->getPointerAddressSpace();
616 }
617
618 /// Returns the strongest permitted ordering on failure, given the
619 /// desired ordering on success.
620 ///
621 /// If the comparison in a cmpxchg operation fails, there is no atomic store
622 /// so release semantics cannot be provided. So this function drops explicit
623 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
624 /// operation would remain SequentiallyConsistent.
625 static AtomicOrdering
626 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
627 switch (SuccessOrdering) {
628 default:
629 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 629)
;
630 case AtomicOrdering::Release:
631 case AtomicOrdering::Monotonic:
632 return AtomicOrdering::Monotonic;
633 case AtomicOrdering::AcquireRelease:
634 case AtomicOrdering::Acquire:
635 return AtomicOrdering::Acquire;
636 case AtomicOrdering::SequentiallyConsistent:
637 return AtomicOrdering::SequentiallyConsistent;
638 }
639 }
640
641 // Methods for support type inquiry through isa, cast, and dyn_cast:
642 static bool classof(const Instruction *I) {
643 return I->getOpcode() == Instruction::AtomicCmpXchg;
644 }
645 static bool classof(const Value *V) {
646 return isa<Instruction>(V) && classof(cast<Instruction>(V));
647 }
648
649private:
650 // Shadow Instruction::setInstructionSubclassData with a private forwarding
651 // method so that subclasses cannot accidentally use it.
652 void setInstructionSubclassData(unsigned short D) {
653 Instruction::setInstructionSubclassData(D);
654 }
655
656 /// The synchronization scope ID of this cmpxchg instruction. Not quite
657 /// enough room in SubClassData for everything, so synchronization scope ID
658 /// gets its own field.
659 SyncScope::ID SSID;
660};
661
662template <>
663struct OperandTraits<AtomicCmpXchgInst> :
664 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
665};
666
667DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 667, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast
<AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void
AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 667, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
AtomicCmpXchgInst::getNumOperands() const { return OperandTraits
<AtomicCmpXchgInst>::operands(this); } template <int
Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &AtomicCmpXchgInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
668
669//===----------------------------------------------------------------------===//
670// AtomicRMWInst Class
671//===----------------------------------------------------------------------===//
672
673/// an instruction that atomically reads a memory location,
674/// combines it with another value, and then stores the result back. Returns
675/// the old value.
676///
677class AtomicRMWInst : public Instruction {
678protected:
679 // Note: Instruction needs to be a friend here to call cloneImpl.
680 friend class Instruction;
681
682 AtomicRMWInst *cloneImpl() const;
683
684public:
685 /// This enumeration lists the possible modifications atomicrmw can make. In
686 /// the descriptions, 'p' is the pointer to the instruction's memory location,
687 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
688 /// instruction. These instructions always return 'old'.
689 enum BinOp {
690 /// *p = v
691 Xchg,
692 /// *p = old + v
693 Add,
694 /// *p = old - v
695 Sub,
696 /// *p = old & v
697 And,
698 /// *p = ~(old & v)
699 Nand,
700 /// *p = old | v
701 Or,
702 /// *p = old ^ v
703 Xor,
704 /// *p = old >signed v ? old : v
705 Max,
706 /// *p = old <signed v ? old : v
707 Min,
708 /// *p = old >unsigned v ? old : v
709 UMax,
710 /// *p = old <unsigned v ? old : v
711 UMin,
712
713 FIRST_BINOP = Xchg,
714 LAST_BINOP = UMin,
715 BAD_BINOP
716 };
717
718 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
719 AtomicOrdering Ordering, SyncScope::ID SSID,
720 Instruction *InsertBefore = nullptr);
721 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
722 AtomicOrdering Ordering, SyncScope::ID SSID,
723 BasicBlock *InsertAtEnd);
724
725 // allocate space for exactly two operands
726 void *operator new(size_t s) {
727 return User::operator new(s, 2);
728 }
729
730 BinOp getOperation() const {
731 return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
732 }
733
734 void setOperation(BinOp Operation) {
735 unsigned short SubclassData = getSubclassDataFromInstruction();
736 setInstructionSubclassData((SubclassData & 31) |
737 (Operation << 5));
738 }
739
740 /// Return true if this is a RMW on a volatile memory location.
741 ///
742 bool isVolatile() const {
743 return getSubclassDataFromInstruction() & 1;
744 }
745
746 /// Specify whether this is a volatile RMW or not.
747 ///
748 void setVolatile(bool V) {
749 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
750 (unsigned)V);
751 }
752
753 /// Transparently provide more efficient getOperand methods.
754 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
755
756 /// Returns the ordering constraint of this rmw instruction.
757 AtomicOrdering getOrdering() const {
758 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
759 }
760
761 /// Sets the ordering constraint of this rmw instruction.
762 void setOrdering(AtomicOrdering Ordering) {
763 assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 764, __PRETTY_FUNCTION__))
764 "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."
) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 764, __PRETTY_FUNCTION__))
;
765 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
766 ((unsigned)Ordering << 2));
767 }
768
769 /// Returns the synchronization scope ID of this rmw instruction.
770 SyncScope::ID getSyncScopeID() const {
771 return SSID;
772 }
773
774 /// Sets the synchronization scope ID of this rmw instruction.
775 void setSyncScopeID(SyncScope::ID SSID) {
776 this->SSID = SSID;
777 }
778
779 Value *getPointerOperand() { return getOperand(0); }
780 const Value *getPointerOperand() const { return getOperand(0); }
781 static unsigned getPointerOperandIndex() { return 0U; }
782
783 Value *getValOperand() { return getOperand(1); }
784 const Value *getValOperand() const { return getOperand(1); }
785
786 /// Returns the address space of the pointer operand.
787 unsigned getPointerAddressSpace() const {
788 return getPointerOperand()->getType()->getPointerAddressSpace();
789 }
790
791 // Methods for support type inquiry through isa, cast, and dyn_cast:
792 static bool classof(const Instruction *I) {
793 return I->getOpcode() == Instruction::AtomicRMW;
794 }
795 static bool classof(const Value *V) {
796 return isa<Instruction>(V) && classof(cast<Instruction>(V));
797 }
798
799private:
800 void Init(BinOp Operation, Value *Ptr, Value *Val,
801 AtomicOrdering Ordering, SyncScope::ID SSID);
802
803 // Shadow Instruction::setInstructionSubclassData with a private forwarding
804 // method so that subclasses cannot accidentally use it.
805 void setInstructionSubclassData(unsigned short D) {
806 Instruction::setInstructionSubclassData(D);
807 }
808
809 /// The synchronization scope ID of this rmw instruction. Not quite enough
810 /// room in SubClassData for everything, so synchronization scope ID gets its
811 /// own field.
812 SyncScope::ID SSID;
813};
814
815template <>
816struct OperandTraits<AtomicRMWInst>
817 : public FixedNumOperandTraits<AtomicRMWInst,2> {
818};
819
820DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { ((i_nocapture < OperandTraits
<AtomicRMWInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 820, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<AtomicRMWInst>::op_begin(const_cast<
AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<AtomicRMWInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 820, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst
::getNumOperands() const { return OperandTraits<AtomicRMWInst
>::operands(this); } template <int Idx_nocapture> Use
&AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
821
822//===----------------------------------------------------------------------===//
823// GetElementPtrInst Class
824//===----------------------------------------------------------------------===//
825
826// checkGEPType - Simple wrapper function to give a better assertion failure
827// message on bad indexes for a gep instruction.
828//
829inline Type *checkGEPType(Type *Ty) {
830 assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!"
) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 830, __PRETTY_FUNCTION__))
;
831 return Ty;
832}
833
834/// an instruction for type-safe pointer arithmetic to
835/// access elements of arrays and structs
836///
837class GetElementPtrInst : public Instruction {
838 Type *SourceElementType;
839 Type *ResultElementType;
840
841 GetElementPtrInst(const GetElementPtrInst &GEPI);
842
843 /// Constructors - Create a getelementptr instruction with a base pointer an
844 /// list of indices. The first ctor can optionally insert before an existing
845 /// instruction, the second appends the new instruction to the specified
846 /// BasicBlock.
847 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
848 ArrayRef<Value *> IdxList, unsigned Values,
849 const Twine &NameStr, Instruction *InsertBefore);
850 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
851 ArrayRef<Value *> IdxList, unsigned Values,
852 const Twine &NameStr, BasicBlock *InsertAtEnd);
853
854 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
855
856protected:
857 // Note: Instruction needs to be a friend here to call cloneImpl.
858 friend class Instruction;
859
860 GetElementPtrInst *cloneImpl() const;
861
862public:
863 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
864 ArrayRef<Value *> IdxList,
865 const Twine &NameStr = "",
866 Instruction *InsertBefore = nullptr) {
867 unsigned Values = 1 + unsigned(IdxList.size());
868 if (!PointeeType)
869 PointeeType =
870 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
871 else
872 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 874, __PRETTY_FUNCTION__))
873 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 874, __PRETTY_FUNCTION__))
874 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 874, __PRETTY_FUNCTION__))
;
875 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
876 NameStr, InsertBefore);
877 }
878
879 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
880 ArrayRef<Value *> IdxList,
881 const Twine &NameStr,
882 BasicBlock *InsertAtEnd) {
883 unsigned Values = 1 + unsigned(IdxList.size());
884 if (!PointeeType)
885 PointeeType =
886 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
887 else
888 assert(((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 890, __PRETTY_FUNCTION__))
889 PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 890, __PRETTY_FUNCTION__))
890 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 890, __PRETTY_FUNCTION__))
;
891 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
892 NameStr, InsertAtEnd);
893 }
894
895 /// Create an "inbounds" getelementptr. See the documentation for the
896 /// "inbounds" flag in LangRef.html for details.
897 static GetElementPtrInst *CreateInBounds(Value *Ptr,
898 ArrayRef<Value *> IdxList,
899 const Twine &NameStr = "",
900 Instruction *InsertBefore = nullptr){
901 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
902 }
903
904 static GetElementPtrInst *
905 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
906 const Twine &NameStr = "",
907 Instruction *InsertBefore = nullptr) {
908 GetElementPtrInst *GEP =
909 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
910 GEP->setIsInBounds(true);
911 return GEP;
912 }
913
914 static GetElementPtrInst *CreateInBounds(Value *Ptr,
915 ArrayRef<Value *> IdxList,
916 const Twine &NameStr,
917 BasicBlock *InsertAtEnd) {
918 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
919 }
920
921 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
922 ArrayRef<Value *> IdxList,
923 const Twine &NameStr,
924 BasicBlock *InsertAtEnd) {
925 GetElementPtrInst *GEP =
926 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
927 GEP->setIsInBounds(true);
928 return GEP;
929 }
930
931 /// Transparently provide more efficient getOperand methods.
932 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
933
934 Type *getSourceElementType() const { return SourceElementType; }
935
936 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
937 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
938
939 Type *getResultElementType() const {
940 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 941, __PRETTY_FUNCTION__))
941 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 941, __PRETTY_FUNCTION__))
;
942 return ResultElementType;
943 }
944
945 /// Returns the address space of this instruction's pointer type.
946 unsigned getAddressSpace() const {
947 // Note that this is always the same as the pointer operand's address space
948 // and that is cheaper to compute, so cheat here.
949 return getPointerAddressSpace();
950 }
951
952 /// Returns the type of the element that would be loaded with
953 /// a load instruction with the specified parameters.
954 ///
955 /// Null is returned if the indices are invalid for the specified
956 /// pointer type.
957 ///
958 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
959 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
960 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
961
962 inline op_iterator idx_begin() { return op_begin()+1; }
963 inline const_op_iterator idx_begin() const { return op_begin()+1; }
964 inline op_iterator idx_end() { return op_end(); }
965 inline const_op_iterator idx_end() const { return op_end(); }
966
967 inline iterator_range<op_iterator> indices() {
968 return make_range(idx_begin(), idx_end());
969 }
970
971 inline iterator_range<const_op_iterator> indices() const {
972 return make_range(idx_begin(), idx_end());
973 }
974
975 Value *getPointerOperand() {
976 return getOperand(0);
977 }
978 const Value *getPointerOperand() const {
979 return getOperand(0);
980 }
981 static unsigned getPointerOperandIndex() {
982 return 0U; // get index for modifying correct operand.
983 }
984
985 /// Method to return the pointer operand as a
986 /// PointerType.
987 Type *getPointerOperandType() const {
988 return getPointerOperand()->getType();
989 }
990
991 /// Returns the address space of the pointer operand.
992 unsigned getPointerAddressSpace() const {
993 return getPointerOperandType()->getPointerAddressSpace();
994 }
995
996 /// Returns the pointer type returned by the GEP
997 /// instruction, which may be a vector of pointers.
998 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
999 return getGEPReturnType(
1000 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
1001 Ptr, IdxList);
1002 }
1003 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1004 ArrayRef<Value *> IdxList) {
1005 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1006 Ptr->getType()->getPointerAddressSpace());
1007 // Vector GEP
1008 if (Ptr->getType()->isVectorTy()) {
1009 unsigned NumElem = Ptr->getType()->getVectorNumElements();
1010 return VectorType::get(PtrTy, NumElem);
1011 }
1012 for (Value *Index : IdxList)
1013 if (Index->getType()->isVectorTy()) {
1014 unsigned NumElem = Index->getType()->getVectorNumElements();
1015 return VectorType::get(PtrTy, NumElem);
1016 }
1017 // Scalar GEP
1018 return PtrTy;
1019 }
1020
1021 unsigned getNumIndices() const { // Note: always non-negative
1022 return getNumOperands() - 1;
1023 }
1024
1025 bool hasIndices() const {
1026 return getNumOperands() > 1;
1027 }
1028
1029 /// Return true if all of the indices of this GEP are
1030 /// zeros. If so, the result pointer and the first operand have the same
1031 /// value, just potentially different types.
1032 bool hasAllZeroIndices() const;
1033
1034 /// Return true if all of the indices of this GEP are
1035 /// constant integers. If so, the result pointer and the first operand have
1036 /// a constant offset between them.
1037 bool hasAllConstantIndices() const;
1038
1039 /// Set or clear the inbounds flag on this GEP instruction.
1040 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1041 void setIsInBounds(bool b = true);
1042
1043 /// Determine whether the GEP has the inbounds flag.
1044 bool isInBounds() const;
1045
1046 /// Accumulate the constant address offset of this GEP if possible.
1047 ///
1048 /// This routine accepts an APInt into which it will accumulate the constant
1049 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1050 /// all-constant, it returns false and the value of the offset APInt is
1051 /// undefined (it is *not* preserved!). The APInt passed into this routine
1052 /// must be at least as wide as the IntPtr type for the address space of
1053 /// the base GEP pointer.
1054 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1055
1056 // Methods for support type inquiry through isa, cast, and dyn_cast:
1057 static bool classof(const Instruction *I) {
1058 return (I->getOpcode() == Instruction::GetElementPtr);
1059 }
1060 static bool classof(const Value *V) {
1061 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1062 }
1063};
1064
1065template <>
1066struct OperandTraits<GetElementPtrInst> :
1067 public VariadicOperandTraits<GetElementPtrInst, 1> {
1068};
1069
1070GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1071 ArrayRef<Value *> IdxList, unsigned Values,
1072 const Twine &NameStr,
1073 Instruction *InsertBefore)
1074 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1075 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1076 Values, InsertBefore),
1077 SourceElementType(PointeeType),
1078 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1079 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1080, __PRETTY_FUNCTION__))
1080 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1080, __PRETTY_FUNCTION__))
;
1081 init(Ptr, IdxList, NameStr);
1082}
1083
1084GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1085 ArrayRef<Value *> IdxList, unsigned Values,
1086 const Twine &NameStr,
1087 BasicBlock *InsertAtEnd)
1088 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1089 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1090 Values, InsertAtEnd),
1091 SourceElementType(PointeeType),
1092 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1093 assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1094, __PRETTY_FUNCTION__))
1094 cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()->
getScalarType())->getElementType()) ? static_cast<void>
(0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1094, __PRETTY_FUNCTION__))
;
1095 init(Ptr, IdxList, NameStr);
1096}
1097
1098DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1098, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<GetElementPtrInst>::op_begin(const_cast
<GetElementPtrInst*>(this))[i_nocapture].get()); } void
GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture
) { ((i_nocapture < OperandTraits<GetElementPtrInst>
::operands(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1098, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
GetElementPtrInst::getNumOperands() const { return OperandTraits
<GetElementPtrInst>::operands(this); } template <int
Idx_nocapture> Use &GetElementPtrInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &GetElementPtrInst::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
1099
1100//===----------------------------------------------------------------------===//
1101// ICmpInst Class
1102//===----------------------------------------------------------------------===//
1103
1104/// This instruction compares its operands according to the predicate given
1105/// to the constructor. It only operates on integers or pointers. The operands
1106/// must be identical types.
1107/// Represent an integer comparison operator.
1108class ICmpInst: public CmpInst {
1109 void AssertOK() {
1110 assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1111, __PRETTY_FUNCTION__))
1111 "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value")
? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1111, __PRETTY_FUNCTION__))
;
1112 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1113, __PRETTY_FUNCTION__))
1113 "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1113, __PRETTY_FUNCTION__))
;
1114 // Check that the operands are the right type
1115 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1117, __PRETTY_FUNCTION__))
1116 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1117, __PRETTY_FUNCTION__))
1117 "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand
(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"
) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1117, __PRETTY_FUNCTION__))
;
1118 }
1119
1120protected:
1121 // Note: Instruction needs to be a friend here to call cloneImpl.
1122 friend class Instruction;
1123
1124 /// Clone an identical ICmpInst
1125 ICmpInst *cloneImpl() const;
1126
1127public:
1128 /// Constructor with insert-before-instruction semantics.
1129 ICmpInst(
1130 Instruction *InsertBefore, ///< Where to insert
1131 Predicate pred, ///< The predicate to use for the comparison
1132 Value *LHS, ///< The left-hand-side of the expression
1133 Value *RHS, ///< The right-hand-side of the expression
1134 const Twine &NameStr = "" ///< Name of the instruction
1135 ) : CmpInst(makeCmpResultType(LHS->getType()),
1136 Instruction::ICmp, pred, LHS, RHS, NameStr,
1137 InsertBefore) {
1138#ifndef NDEBUG
1139 AssertOK();
1140#endif
1141 }
1142
1143 /// Constructor with insert-at-end semantics.
1144 ICmpInst(
1145 BasicBlock &InsertAtEnd, ///< Block to insert into.
1146 Predicate pred, ///< The predicate to use for the comparison
1147 Value *LHS, ///< The left-hand-side of the expression
1148 Value *RHS, ///< The right-hand-side of the expression
1149 const Twine &NameStr = "" ///< Name of the instruction
1150 ) : CmpInst(makeCmpResultType(LHS->getType()),
1151 Instruction::ICmp, pred, LHS, RHS, NameStr,
1152 &InsertAtEnd) {
1153#ifndef NDEBUG
1154 AssertOK();
1155#endif
1156 }
1157
1158 /// Constructor with no-insertion semantics
1159 ICmpInst(
1160 Predicate pred, ///< The predicate to use for the comparison
1161 Value *LHS, ///< The left-hand-side of the expression
1162 Value *RHS, ///< The right-hand-side of the expression
1163 const Twine &NameStr = "" ///< Name of the instruction
1164 ) : CmpInst(makeCmpResultType(LHS->getType()),
1165 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1166#ifndef NDEBUG
1167 AssertOK();
1168#endif
1169 }
1170
1171 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1172 /// @returns the predicate that would be the result if the operand were
1173 /// regarded as signed.
1174 /// Return the signed version of the predicate
1175 Predicate getSignedPredicate() const {
1176 return getSignedPredicate(getPredicate());
1177 }
1178
1179 /// This is a static version that you can use without an instruction.
1180 /// Return the signed version of the predicate.
1181 static Predicate getSignedPredicate(Predicate pred);
1182
1183 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1184 /// @returns the predicate that would be the result if the operand were
1185 /// regarded as unsigned.
1186 /// Return the unsigned version of the predicate
1187 Predicate getUnsignedPredicate() const {
1188 return getUnsignedPredicate(getPredicate());
1189 }
1190
1191 /// This is a static version that you can use without an instruction.
1192 /// Return the unsigned version of the predicate.
1193 static Predicate getUnsignedPredicate(Predicate pred);
1194
1195 /// Return true if this predicate is either EQ or NE. This also
1196 /// tests for commutativity.
1197 static bool isEquality(Predicate P) {
1198 return P == ICMP_EQ || P == ICMP_NE;
1199 }
1200
1201 /// Return true if this predicate is either EQ or NE. This also
1202 /// tests for commutativity.
1203 bool isEquality() const {
1204 return isEquality(getPredicate());
1205 }
1206
1207 /// @returns true if the predicate of this ICmpInst is commutative
1208 /// Determine if this relation is commutative.
1209 bool isCommutative() const { return isEquality(); }
1210
1211 /// Return true if the predicate is relational (not EQ or NE).
1212 ///
1213 bool isRelational() const {
1214 return !isEquality();
1215 }
1216
1217 /// Return true if the predicate is relational (not EQ or NE).
1218 ///
1219 static bool isRelational(Predicate P) {
1220 return !isEquality(P);
1221 }
1222
1223 /// Exchange the two operands to this instruction in such a way that it does
1224 /// not modify the semantics of the instruction. The predicate value may be
1225 /// changed to retain the same result if the predicate is order dependent
1226 /// (e.g. ult).
1227 /// Swap operands and adjust predicate.
1228 void swapOperands() {
1229 setPredicate(getSwappedPredicate());
1230 Op<0>().swap(Op<1>());
1231 }
1232
1233 // Methods for support type inquiry through isa, cast, and dyn_cast:
1234 static bool classof(const Instruction *I) {
1235 return I->getOpcode() == Instruction::ICmp;
125
Calling 'Instruction::getOpcode'
128
Returning from 'Instruction::getOpcode'
129
Assuming the condition is true
502
Calling 'Instruction::getOpcode'
505
Returning from 'Instruction::getOpcode'
506
Assuming the condition is true
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
29
Calling 'isa'
38
Returning from 'isa'
39
Assuming the condition is false
113
Calling 'isa'
122
Returning from 'isa'
123
Assuming the condition is true
124
Calling 'ICmpInst::classof'
130
Returning from 'ICmpInst::classof'
490
Calling 'isa'
499
Returning from 'isa'
500
Assuming the condition is true
501
Calling 'ICmpInst::classof'
507
Returning from 'ICmpInst::classof'
1239 }
1240};
1241
1242//===----------------------------------------------------------------------===//
1243// FCmpInst Class
1244//===----------------------------------------------------------------------===//
1245
1246/// This instruction compares its operands according to the predicate given
1247/// to the constructor. It only operates on floating point values or packed
1248/// vectors of floating point values. The operands must be identical types.
1249/// Represents a floating point comparison operator.
1250class FCmpInst: public CmpInst {
1251 void AssertOK() {
1252 assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ?
static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1252, __PRETTY_FUNCTION__))
;
1253 assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1254, __PRETTY_FUNCTION__))
1254 "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!"
) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1254, __PRETTY_FUNCTION__))
;
1255 // Check that the operands are the right type
1256 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1257, __PRETTY_FUNCTION__))
1257 "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction") ? static_cast<
void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1257, __PRETTY_FUNCTION__))
;
1258 }
1259
1260protected:
1261 // Note: Instruction needs to be a friend here to call cloneImpl.
1262 friend class Instruction;
1263
1264 /// Clone an identical FCmpInst
1265 FCmpInst *cloneImpl() const;
1266
1267public:
1268 /// Constructor with insert-before-instruction semantics.
1269 FCmpInst(
1270 Instruction *InsertBefore, ///< Where to insert
1271 Predicate pred, ///< The predicate to use for the comparison
1272 Value *LHS, ///< The left-hand-side of the expression
1273 Value *RHS, ///< The right-hand-side of the expression
1274 const Twine &NameStr = "" ///< Name of the instruction
1275 ) : CmpInst(makeCmpResultType(LHS->getType()),
1276 Instruction::FCmp, pred, LHS, RHS, NameStr,
1277 InsertBefore) {
1278 AssertOK();
1279 }
1280
1281 /// Constructor with insert-at-end semantics.
1282 FCmpInst(
1283 BasicBlock &InsertAtEnd, ///< Block to insert into.
1284 Predicate pred, ///< The predicate to use for the comparison
1285 Value *LHS, ///< The left-hand-side of the expression
1286 Value *RHS, ///< The right-hand-side of the expression
1287 const Twine &NameStr = "" ///< Name of the instruction
1288 ) : CmpInst(makeCmpResultType(LHS->getType()),
1289 Instruction::FCmp, pred, LHS, RHS, NameStr,
1290 &InsertAtEnd) {
1291 AssertOK();
1292 }
1293
1294 /// Constructor with no-insertion semantics
1295 FCmpInst(
1296 Predicate pred, ///< The predicate to use for the comparison
1297 Value *LHS, ///< The left-hand-side of the expression
1298 Value *RHS, ///< The right-hand-side of the expression
1299 const Twine &NameStr = "" ///< Name of the instruction
1300 ) : CmpInst(makeCmpResultType(LHS->getType()),
1301 Instruction::FCmp, pred, LHS, RHS, NameStr) {
1302 AssertOK();
1303 }
1304
1305 /// @returns true if the predicate of this instruction is EQ or NE.
1306 /// Determine if this is an equality predicate.
1307 static bool isEquality(Predicate Pred) {
1308 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1309 Pred == FCMP_UNE;
1310 }
1311
1312 /// @returns true if the predicate of this instruction is EQ or NE.
1313 /// Determine if this is an equality predicate.
1314 bool isEquality() const { return isEquality(getPredicate()); }
1315
1316 /// @returns true if the predicate of this instruction is commutative.
1317 /// Determine if this is a commutative predicate.
1318 bool isCommutative() const {
1319 return isEquality() ||
1320 getPredicate() == FCMP_FALSE ||
1321 getPredicate() == FCMP_TRUE ||
1322 getPredicate() == FCMP_ORD ||
1323 getPredicate() == FCMP_UNO;
1324 }
1325
1326 /// @returns true if the predicate is relational (not EQ or NE).
1327 /// Determine if this a relational predicate.
1328 bool isRelational() const { return !isEquality(); }
1329
1330 /// Exchange the two operands to this instruction in such a way that it does
1331 /// not modify the semantics of the instruction. The predicate value may be
1332 /// changed to retain the same result if the predicate is order dependent
1333 /// (e.g. ult).
1334 /// Swap operands and adjust predicate.
1335 void swapOperands() {
1336 setPredicate(getSwappedPredicate());
1337 Op<0>().swap(Op<1>());
1338 }
1339
1340 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1341 static bool classof(const Instruction *I) {
1342 return I->getOpcode() == Instruction::FCmp;
1343 }
1344 static bool classof(const Value *V) {
1345 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1346 }
1347};
1348
1349//===----------------------------------------------------------------------===//
1350/// This class represents a function call, abstracting a target
1351/// machine's calling convention. This class uses low bit of the SubClassData
1352/// field to indicate whether or not this is a tail call. The rest of the bits
1353/// hold the calling convention of the call.
1354///
1355class CallInst : public Instruction,
1356 public OperandBundleUser<CallInst, User::op_iterator> {
1357 friend class OperandBundleUser<CallInst, User::op_iterator>;
1358
1359 AttributeList Attrs; ///< parameter attributes for call
1360 FunctionType *FTy;
1361
1362 CallInst(const CallInst &CI);
1363
1364 /// Construct a CallInst given a range of arguments.
1365 /// Construct a CallInst from a range of arguments
1366 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1367 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1368 Instruction *InsertBefore);
1369
1370 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1371 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1372 Instruction *InsertBefore)
1373 : CallInst(cast<FunctionType>(
1374 cast<PointerType>(Func->getType())->getElementType()),
1375 Func, Args, Bundles, NameStr, InsertBefore) {}
1376
1377 inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
1378 Instruction *InsertBefore)
1379 : CallInst(Func, Args, None, NameStr, InsertBefore) {}
1380
1381 /// Construct a CallInst given a range of arguments.
1382 /// Construct a CallInst from a range of arguments
1383 inline CallInst(Value *Func, ArrayRef<Value *> Args,
1384 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1385 BasicBlock *InsertAtEnd);
1386
1387 explicit CallInst(Value *F, const Twine &NameStr,
1388 Instruction *InsertBefore);
1389
1390 CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
1391
1392 void init(Value *Func, ArrayRef<Value *> Args,
1393 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
1394 init(cast<FunctionType>(
1395 cast<PointerType>(Func->getType())->getElementType()),
1396 Func, Args, Bundles, NameStr);
1397 }
1398 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1399 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1400 void init(Value *Func, const Twine &NameStr);
1401
1402 bool hasDescriptor() const { return HasDescriptor; }
1403
1404protected:
1405 // Note: Instruction needs to be a friend here to call cloneImpl.
1406 friend class Instruction;
1407
1408 CallInst *cloneImpl() const;
1409
1410public:
1411 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1412 ArrayRef<OperandBundleDef> Bundles = None,
1413 const Twine &NameStr = "",
1414 Instruction *InsertBefore = nullptr) {
1415 return Create(cast<FunctionType>(
1416 cast<PointerType>(Func->getType())->getElementType()),
1417 Func, Args, Bundles, NameStr, InsertBefore);
1418 }
1419
1420 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1421 const Twine &NameStr,
1422 Instruction *InsertBefore = nullptr) {
1423 return Create(cast<FunctionType>(
1424 cast<PointerType>(Func->getType())->getElementType()),
1425 Func, Args, None, NameStr, InsertBefore);
1426 }
1427
1428 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1429 const Twine &NameStr,
1430 Instruction *InsertBefore = nullptr) {
1431 return new (unsigned(Args.size() + 1))
1432 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1433 }
1434
1435 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1436 ArrayRef<OperandBundleDef> Bundles = None,
1437 const Twine &NameStr = "",
1438 Instruction *InsertBefore = nullptr) {
1439 const unsigned TotalOps =
1440 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1441 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1442
1443 return new (TotalOps, DescriptorBytes)
1444 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1445 }
1446
1447 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1448 ArrayRef<OperandBundleDef> Bundles,
1449 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1450 const unsigned TotalOps =
1451 unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
1452 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1453
1454 return new (TotalOps, DescriptorBytes)
1455 CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
1456 }
1457
1458 static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
1459 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1460 return new (unsigned(Args.size() + 1))
1461 CallInst(Func, Args, None, NameStr, InsertAtEnd);
1462 }
1463
1464 static CallInst *Create(Value *F, const Twine &NameStr = "",
1465 Instruction *InsertBefore = nullptr) {
1466 return new(1) CallInst(F, NameStr, InsertBefore);
1467 }
1468
1469 static CallInst *Create(Value *F, const Twine &NameStr,
1470 BasicBlock *InsertAtEnd) {
1471 return new(1) CallInst(F, NameStr, InsertAtEnd);
1472 }
1473
1474 /// Create a clone of \p CI with a different set of operand bundles and
1475 /// insert it before \p InsertPt.
1476 ///
1477 /// The returned call instruction is identical \p CI in every way except that
1478 /// the operand bundles for the new instruction are set to the operand bundles
1479 /// in \p Bundles.
1480 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1481 Instruction *InsertPt = nullptr);
1482
1483 /// Generate the IR for a call to malloc:
1484 /// 1. Compute the malloc call's argument as the specified type's size,
1485 /// possibly multiplied by the array size if the array size is not
1486 /// constant 1.
1487 /// 2. Call malloc with that argument.
1488 /// 3. Bitcast the result of the malloc call to the specified type.
1489 static Instruction *CreateMalloc(Instruction *InsertBefore,
1490 Type *IntPtrTy, Type *AllocTy,
1491 Value *AllocSize, Value *ArraySize = nullptr,
1492 Function* MallocF = nullptr,
1493 const Twine &Name = "");
1494 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
1495 Type *IntPtrTy, Type *AllocTy,
1496 Value *AllocSize, Value *ArraySize = nullptr,
1497 Function* MallocF = nullptr,
1498 const Twine &Name = "");
1499 static Instruction *CreateMalloc(Instruction *InsertBefore,
1500 Type *IntPtrTy, Type *AllocTy,
1501 Value *AllocSize, Value *ArraySize = nullptr,
1502 ArrayRef<OperandBundleDef> Bundles = None,
1503 Function* MallocF = nullptr,
1504 const Twine &Name = "");
1505 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
1506 Type *IntPtrTy, Type *AllocTy,
1507 Value *AllocSize, Value *ArraySize = nullptr,
1508 ArrayRef<OperandBundleDef> Bundles = None,
1509 Function* MallocF = nullptr,
1510 const Twine &Name = "");
1511 /// Generate the IR for a call to the builtin free function.
1512 static Instruction *CreateFree(Value *Source,
1513 Instruction *InsertBefore);
1514 static Instruction *CreateFree(Value *Source,
1515 BasicBlock *InsertAtEnd);
1516 static Instruction *CreateFree(Value *Source,
1517 ArrayRef<OperandBundleDef> Bundles,
1518 Instruction *InsertBefore);
1519 static Instruction *CreateFree(Value *Source,
1520 ArrayRef<OperandBundleDef> Bundles,
1521 BasicBlock *InsertAtEnd);
1522
1523 FunctionType *getFunctionType() const { return FTy; }
1524
1525 void mutateFunctionType(FunctionType *FTy) {
1526 mutateType(FTy->getReturnType());
1527 this->FTy = FTy;
1528 }
1529
1530 // Note that 'musttail' implies 'tail'.
1531 enum TailCallKind { TCK_None = 0, TCK_Tail = 1, TCK_MustTail = 2,
1532 TCK_NoTail = 3 };
1533 TailCallKind getTailCallKind() const {
1534 return TailCallKind(getSubclassDataFromInstruction() & 3);
1535 }
1536
1537 bool isTailCall() const {
1538 unsigned Kind = getSubclassDataFromInstruction() & 3;
1539 return Kind == TCK_Tail || Kind == TCK_MustTail;
1540 }
1541
1542 bool isMustTailCall() const {
1543 return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
1544 }
1545
1546 bool isNoTailCall() const {
1547 return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
1548 }
1549
1550 void setTailCall(bool isTC = true) {
1551 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
1552 unsigned(isTC ? TCK_Tail : TCK_None));
1553 }
1554
1555 void setTailCallKind(TailCallKind TCK) {
1556 setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
1557 unsigned(TCK));
1558 }
1559
1560 /// Provide fast operand accessors
1561 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1562
1563 /// Return the number of call arguments.
1564 ///
1565 unsigned getNumArgOperands() const {
1566 return getNumOperands() - getNumTotalBundleOperands() - 1;
1567 }
1568
1569 /// getArgOperand/setArgOperand - Return/set the i-th call argument.
1570 ///
1571 Value *getArgOperand(unsigned i) const {
1572 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1572, __PRETTY_FUNCTION__))
;
1573 return getOperand(i);
1574 }
1575 void setArgOperand(unsigned i, Value *v) {
1576 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1576, __PRETTY_FUNCTION__))
;
1577 setOperand(i, v);
1578 }
1579
1580 /// Return the iterator pointing to the beginning of the argument list.
1581 op_iterator arg_begin() { return op_begin(); }
1582
1583 /// Return the iterator pointing to the end of the argument list.
1584 op_iterator arg_end() {
1585 // [ call args ], [ operand bundles ], callee
1586 return op_end() - getNumTotalBundleOperands() - 1;
1587 }
1588
1589 /// Iteration adapter for range-for loops.
1590 iterator_range<op_iterator> arg_operands() {
1591 return make_range(arg_begin(), arg_end());
1592 }
1593
1594 /// Return the iterator pointing to the beginning of the argument list.
1595 const_op_iterator arg_begin() const { return op_begin(); }
1596
1597 /// Return the iterator pointing to the end of the argument list.
1598 const_op_iterator arg_end() const {
1599 // [ call args ], [ operand bundles ], callee
1600 return op_end() - getNumTotalBundleOperands() - 1;
1601 }
1602
1603 /// Iteration adapter for range-for loops.
1604 iterator_range<const_op_iterator> arg_operands() const {
1605 return make_range(arg_begin(), arg_end());
1606 }
1607
1608 /// Wrappers for getting the \c Use of a call argument.
1609 const Use &getArgOperandUse(unsigned i) const {
1610 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1610, __PRETTY_FUNCTION__))
;
1611 return getOperandUse(i);
1612 }
1613 Use &getArgOperandUse(unsigned i) {
1614 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1614, __PRETTY_FUNCTION__))
;
1615 return getOperandUse(i);
1616 }
1617
1618 /// If one of the arguments has the 'returned' attribute, return its
1619 /// operand value. Otherwise, return nullptr.
1620 Value *getReturnedArgOperand() const;
1621
1622 /// getCallingConv/setCallingConv - Get or set the calling convention of this
1623 /// function call.
1624 CallingConv::ID getCallingConv() const {
1625 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1626 }
1627 void setCallingConv(CallingConv::ID CC) {
1628 auto ID = static_cast<unsigned>(CC);
1629 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")((!(ID & ~CallingConv::MaxID) && "Unsupported calling convention"
) ? static_cast<void> (0) : __assert_fail ("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1629, __PRETTY_FUNCTION__))
;
1630 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1631 (ID << 2));
1632 }
1633
1634 /// Return the parameter attributes for this call.
1635 ///
1636 AttributeList getAttributes() const { return Attrs; }
1637
1638 /// Set the parameter attributes for this call.
1639 ///
1640 void setAttributes(AttributeList A) { Attrs = A; }
1641
1642 /// adds the attribute to the list of attributes.
1643 void addAttribute(unsigned i, Attribute::AttrKind Kind);
1644
1645 /// adds the attribute to the list of attributes.
1646 void addAttribute(unsigned i, Attribute Attr);
1647
1648 /// Adds the attribute to the indicated argument
1649 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
1650
1651 /// Adds the attribute to the indicated argument
1652 void addParamAttr(unsigned ArgNo, Attribute Attr);
1653
1654 /// removes the attribute from the list of attributes.
1655 void removeAttribute(unsigned i, Attribute::AttrKind Kind);
1656
1657 /// removes the attribute from the list of attributes.
1658 void removeAttribute(unsigned i, StringRef Kind);
1659
1660 /// Removes the attribute from the given argument
1661 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
1662
1663 /// Removes the attribute from the given argument
1664 void removeParamAttr(unsigned ArgNo, StringRef Kind);
1665
1666 /// adds the dereferenceable attribute to the list of attributes.
1667 void addDereferenceableAttr(unsigned i, uint64_t Bytes);
1668
1669 /// adds the dereferenceable_or_null attribute to the list of
1670 /// attributes.
1671 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
1672
1673 /// Determine whether this call has the given attribute.
1674 bool hasFnAttr(Attribute::AttrKind Kind) const {
1675 assert(Kind != Attribute::NoBuiltin &&((Kind != Attribute::NoBuiltin && "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1676, __PRETTY_FUNCTION__))
1676 "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin")((Kind != Attribute::NoBuiltin && "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1676, __PRETTY_FUNCTION__))
;
1677 return hasFnAttrImpl(Kind);
1678 }
1679
1680 /// Determine whether this call has the given attribute.
1681 bool hasFnAttr(StringRef Kind) const {
1682 return hasFnAttrImpl(Kind);
1683 }
1684
1685 /// Determine whether the return value has the given attribute.
1686 bool hasRetAttr(Attribute::AttrKind Kind) const;
1687
1688 /// Determine whether the argument or parameter has the given attribute.
1689 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1690
1691 /// Get the attribute of a given kind at a position.
1692 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1693 return getAttributes().getAttribute(i, Kind);
1694 }
1695
1696 /// Get the attribute of a given kind at a position.
1697 Attribute getAttribute(unsigned i, StringRef Kind) const {
1698 return getAttributes().getAttribute(i, Kind);
1699 }
1700
1701 /// Get the attribute of a given kind from a given arg
1702 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1703 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1703, __PRETTY_FUNCTION__))
;
1704 return getAttributes().getParamAttr(ArgNo, Kind);
1705 }
1706
1707 /// Get the attribute of a given kind from a given arg
1708 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1709 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1709, __PRETTY_FUNCTION__))
;
1710 return getAttributes().getParamAttr(ArgNo, Kind);
1711 }
1712
1713 /// Return true if the data operand at index \p i has the attribute \p
1714 /// A.
1715 ///
1716 /// Data operands include call arguments and values used in operand bundles,
1717 /// but does not include the callee operand. This routine dispatches to the
1718 /// underlying AttributeList or the OperandBundleUser as appropriate.
1719 ///
1720 /// The index \p i is interpreted as
1721 ///
1722 /// \p i == Attribute::ReturnIndex -> the return value
1723 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1724 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1725 /// (\p i - 1) in the operand list.
1726 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const;
1727
1728 /// Extract the alignment of the return value.
1729 unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
1730
1731 /// Extract the alignment for a call or parameter (0=unknown).
1732 unsigned getParamAlignment(unsigned ArgNo) const {
1733 return Attrs.getParamAlignment(ArgNo);
1734 }
1735
1736 /// Extract the number of dereferenceable bytes for a call or
1737 /// parameter (0=unknown).
1738 uint64_t getDereferenceableBytes(unsigned i) const {
1739 return Attrs.getDereferenceableBytes(i);
1740 }
1741
1742 /// Extract the number of dereferenceable_or_null bytes for a call or
1743 /// parameter (0=unknown).
1744 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1745 return Attrs.getDereferenceableOrNullBytes(i);
1746 }
1747
1748 /// @brief Determine if the return value is marked with NoAlias attribute.
1749 bool returnDoesNotAlias() const {
1750 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1751 }
1752
1753 /// Return true if the call should not be treated as a call to a
1754 /// builtin.
1755 bool isNoBuiltin() const {
1756 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1757 !hasFnAttrImpl(Attribute::Builtin);
1758 }
1759
1760 /// Determine if the call requires strict floating point semantics.
1761 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1762
1763 /// Return true if the call should not be inlined.
1764 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1765 void setIsNoInline() {
1766 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1767 }
1768
1769 /// Return true if the call can return twice
1770 bool canReturnTwice() const {
1771 return hasFnAttr(Attribute::ReturnsTwice);
1772 }
1773 void setCanReturnTwice() {
1774 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1775 }
1776
1777 /// Determine if the call does not access memory.
1778 bool doesNotAccessMemory() const {
1779 return hasFnAttr(Attribute::ReadNone);
1780 }
1781 void setDoesNotAccessMemory() {
1782 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1783 }
1784
1785 /// Determine if the call does not access or only reads memory.
1786 bool onlyReadsMemory() const {
1787 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1788 }
1789 void setOnlyReadsMemory() {
1790 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1791 }
1792
1793 /// Determine if the call does not access or only writes memory.
1794 bool doesNotReadMemory() const {
1795 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1796 }
1797 void setDoesNotReadMemory() {
1798 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1799 }
1800
1801 /// @brief Determine if the call can access memmory only using pointers based
1802 /// on its arguments.
1803 bool onlyAccessesArgMemory() const {
1804 return hasFnAttr(Attribute::ArgMemOnly);
1805 }
1806 void setOnlyAccessesArgMemory() {
1807 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1808 }
1809
1810 /// @brief Determine if the function may only access memory that is
1811 /// inaccessible from the IR.
1812 bool onlyAccessesInaccessibleMemory() const {
1813 return hasFnAttr(Attribute::InaccessibleMemOnly);
1814 }
1815 void setOnlyAccessesInaccessibleMemory() {
1816 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1817 }
1818
1819 /// @brief Determine if the function may only access memory that is
1820 /// either inaccessible from the IR or pointed to by its arguments.
1821 bool onlyAccessesInaccessibleMemOrArgMem() const {
1822 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1823 }
1824 void setOnlyAccessesInaccessibleMemOrArgMem() {
1825 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOrArgMemOnly);
1826 }
1827
1828 /// Determine if the call cannot return.
1829 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1830 void setDoesNotReturn() {
1831 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1832 }
1833
1834 /// Determine if the call cannot unwind.
1835 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1836 void setDoesNotThrow() {
1837 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1838 }
1839
1840 /// Determine if the call cannot be duplicated.
1841 bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
1842 void setCannotDuplicate() {
1843 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1844 }
1845
1846 /// Determine if the call is convergent
1847 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1848 void setConvergent() {
1849 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1850 }
1851 void setNotConvergent() {
1852 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1853 }
1854
1855 /// Determine if the call returns a structure through first
1856 /// pointer argument.
1857 bool hasStructRetAttr() const {
1858 if (getNumArgOperands() == 0)
1859 return false;
1860
1861 // Be friendly and also check the callee.
1862 return paramHasAttr(0, Attribute::StructRet);
1863 }
1864
1865 /// Determine if any call argument is an aggregate passed by value.
1866 bool hasByValArgument() const {
1867 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1868 }
1869
1870 /// Return the function called, or null if this is an
1871 /// indirect function invocation.
1872 ///
1873 Function *getCalledFunction() const {
1874 return dyn_cast<Function>(Op<-1>());
1875 }
1876
1877 /// Get a pointer to the function that is invoked by this
1878 /// instruction.
1879 const Value *getCalledValue() const { return Op<-1>(); }
1880 Value *getCalledValue() { return Op<-1>(); }
1881
1882 /// Set the function called.
1883 void setCalledFunction(Value* Fn) {
1884 setCalledFunction(
1885 cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
1886 Fn);
1887 }
1888 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1889 this->FTy = FTy;
1890 assert(FTy == cast<FunctionType>(((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1891, __PRETTY_FUNCTION__))
1891 cast<PointerType>(Fn->getType())->getElementType()))((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1891, __PRETTY_FUNCTION__))
;
1892 Op<-1>() = Fn;
1893 }
1894
1895 /// Check if this call is an inline asm statement.
1896 bool isInlineAsm() const {
1897 return isa<InlineAsm>(Op<-1>());
1898 }
1899
1900 // Methods for support type inquiry through isa, cast, and dyn_cast:
1901 static bool classof(const Instruction *I) {
1902 return I->getOpcode() == Instruction::Call;
1903 }
1904 static bool classof(const Value *V) {
1905 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1906 }
1907
1908private:
1909 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
1910 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
1911 return true;
1912
1913 // Operand bundles override attributes on the called function, but don't
1914 // override attributes directly present on the call instruction.
1915 if (isFnAttrDisallowedByOpBundle(Kind))
1916 return false;
1917
1918 if (const Function *F = getCalledFunction())
1919 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex,
1920 Kind);
1921 return false;
1922 }
1923
1924 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1925 // method so that subclasses cannot accidentally use it.
1926 void setInstructionSubclassData(unsigned short D) {
1927 Instruction::setInstructionSubclassData(D);
1928 }
1929};
1930
1931template <>
1932struct OperandTraits<CallInst> : public VariadicOperandTraits<CallInst, 1> {
1933};
1934
1935CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
1936 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1937 BasicBlock *InsertAtEnd)
1938 : Instruction(
1939 cast<FunctionType>(cast<PointerType>(Func->getType())
1940 ->getElementType())->getReturnType(),
1941 Instruction::Call, OperandTraits<CallInst>::op_end(this) -
1942 (Args.size() + CountBundleInputs(Bundles) + 1),
1943 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
1944 init(Func, Args, Bundles, NameStr);
1945}
1946
1947CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1948 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1949 Instruction *InsertBefore)
1950 : Instruction(Ty->getReturnType(), Instruction::Call,
1951 OperandTraits<CallInst>::op_end(this) -
1952 (Args.size() + CountBundleInputs(Bundles) + 1),
1953 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1954 InsertBefore) {
1955 init(Ty, Func, Args, Bundles, NameStr);
1956}
1957
1958// Note: if you get compile errors about private methods then
1959// please update your code to use the high-level operand
1960// interfaces. See line 943 above.
1961DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)CallInst::op_iterator CallInst::op_begin() { return OperandTraits
<CallInst>::op_begin(this); } CallInst::const_op_iterator
CallInst::op_begin() const { return OperandTraits<CallInst
>::op_begin(const_cast<CallInst*>(this)); } CallInst
::op_iterator CallInst::op_end() { return OperandTraits<CallInst
>::op_end(this); } CallInst::const_op_iterator CallInst::op_end
() const { return OperandTraits<CallInst>::op_end(const_cast
<CallInst*>(this)); } Value *CallInst::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CallInst
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1961, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CallInst>::op_begin(const_cast<CallInst
*>(this))[i_nocapture].get()); } void CallInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CallInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1961, __PRETTY_FUNCTION__)); OperandTraits<CallInst>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CallInst
::getNumOperands() const { return OperandTraits<CallInst>
::operands(this); } template <int Idx_nocapture> Use &
CallInst::Op() { return this->OpFrom<Idx_nocapture>(
this); } template <int Idx_nocapture> const Use &CallInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1962
1963//===----------------------------------------------------------------------===//
1964// SelectInst Class
1965//===----------------------------------------------------------------------===//
1966
1967/// This class represents the LLVM 'select' instruction.
1968///
1969class SelectInst : public Instruction {
1970 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1971 Instruction *InsertBefore)
1972 : Instruction(S1->getType(), Instruction::Select,
1973 &Op<0>(), 3, InsertBefore) {
1974 init(C, S1, S2);
1975 setName(NameStr);
1976 }
1977
1978 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1979 BasicBlock *InsertAtEnd)
1980 : Instruction(S1->getType(), Instruction::Select,
1981 &Op<0>(), 3, InsertAtEnd) {
1982 init(C, S1, S2);
1983 setName(NameStr);
1984 }
1985
1986 void init(Value *C, Value *S1, Value *S2) {
1987 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select"
) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 1987, __PRETTY_FUNCTION__))
;
1988 Op<0>() = C;
1989 Op<1>() = S1;
1990 Op<2>() = S2;
1991 }
1992
1993protected:
1994 // Note: Instruction needs to be a friend here to call cloneImpl.
1995 friend class Instruction;
1996
1997 SelectInst *cloneImpl() const;
1998
1999public:
2000 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2001 const Twine &NameStr = "",
2002 Instruction *InsertBefore = nullptr,
2003 Instruction *MDFrom = nullptr) {
2004 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
2005 if (MDFrom)
2006 Sel->copyMetadata(*MDFrom);
2007 return Sel;
2008 }
2009
2010 static SelectInst *Create(Value *C, Value *S1, Value *S2,
2011 const Twine &NameStr,
2012 BasicBlock *InsertAtEnd) {
2013 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
2014 }
2015
2016 const Value *getCondition() const { return Op<0>(); }
2017 const Value *getTrueValue() const { return Op<1>(); }
2018 const Value *getFalseValue() const { return Op<2>(); }
2019 Value *getCondition() { return Op<0>(); }
2020 Value *getTrueValue() { return Op<1>(); }
2021 Value *getFalseValue() { return Op<2>(); }
2022
2023 void setCondition(Value *V) { Op<0>() = V; }
2024 void setTrueValue(Value *V) { Op<1>() = V; }
2025 void setFalseValue(Value *V) { Op<2>() = V; }
2026
2027 /// Return a string if the specified operands are invalid
2028 /// for a select operation, otherwise return null.
2029 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
2030
2031 /// Transparently provide more efficient getOperand methods.
2032 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2033
2034 OtherOps getOpcode() const {
2035 return static_cast<OtherOps>(Instruction::getOpcode());
2036 }
2037
2038 // Methods for support type inquiry through isa, cast, and dyn_cast:
2039 static bool classof(const Instruction *I) {
2040 return I->getOpcode() == Instruction::Select;
2041 }
2042 static bool classof(const Value *V) {
2043 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2044 }
2045};
2046
2047template <>
2048struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
2049};
2050
2051DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 2051, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst
*>(this))[i_nocapture].get()); } void SelectInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-6.0~svn318211/include/llvm/IR/Instructions.h"
, 2051, __PRETTY_FUNCTION__)); OperandTraits<SelectInst>
::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst
::getNumOperands() const { return OperandTraits<SelectInst
>::operands(this); } template <int Idx_nocapture> Use
&SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2052
2053//===----------------------------------------------------------------------===//
2054// VAArgInst Class
2055//===----------------------------------------------------------------------===//
2056
2057/// This class represents the va_arg llvm instruction, which returns
2058/// an argument of the specified type given a va_list and increments that list