Bug Summary

File:llvm/lib/Transforms/Utils/InlineFunction.cpp
Warning:line 557, column 11
Forming reference to null pointer

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InlineFunction.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/include -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Transforms/Utils -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-01-13-084841-49055-1 -x c++ /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp

1//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements inlining of a function into a call site, resolving
10// parameters and the return value as appropriate.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/None.h"
16#include "llvm/ADT/Optional.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SetVector.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/Analysis/AssumptionCache.h"
25#include "llvm/Analysis/BlockFrequencyInfo.h"
26#include "llvm/Analysis/CallGraph.h"
27#include "llvm/Analysis/CaptureTracking.h"
28#include "llvm/Analysis/EHPersonalities.h"
29#include "llvm/Analysis/InstructionSimplify.h"
30#include "llvm/Analysis/ProfileSummaryInfo.h"
31#include "llvm/Transforms/Utils/Local.h"
32#include "llvm/Analysis/ValueTracking.h"
33#include "llvm/Analysis/VectorUtils.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/CFG.h"
37#include "llvm/IR/CallSite.h"
38#include "llvm/IR/Constant.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DIBuilder.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/DebugInfoMetadata.h"
43#include "llvm/IR/DebugLoc.h"
44#include "llvm/IR/DerivedTypes.h"
45#include "llvm/IR/Dominators.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/IRBuilder.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Instructions.h"
51#include "llvm/IR/IntrinsicInst.h"
52#include "llvm/IR/Intrinsics.h"
53#include "llvm/IR/LLVMContext.h"
54#include "llvm/IR/MDBuilder.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Module.h"
57#include "llvm/IR/Type.h"
58#include "llvm/IR/User.h"
59#include "llvm/IR/Value.h"
60#include "llvm/Support/Casting.h"
61#include "llvm/Support/CommandLine.h"
62#include "llvm/Support/ErrorHandling.h"
63#include "llvm/Transforms/Utils/Cloning.h"
64#include "llvm/Transforms/Utils/ValueMapper.h"
65#include <algorithm>
66#include <cassert>
67#include <cstdint>
68#include <iterator>
69#include <limits>
70#include <string>
71#include <utility>
72#include <vector>
73
74using namespace llvm;
75using ProfileCount = Function::ProfileCount;
76
77static cl::opt<bool>
78EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79 cl::Hidden,
80 cl::desc("Convert noalias attributes to metadata during inlining."));
81
82static cl::opt<bool>
83PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
84 cl::init(true), cl::Hidden,
85 cl::desc("Convert align attributes to assumptions during inlining."));
86
87llvm::InlineResult llvm::InlineFunction(CallBase *CB, InlineFunctionInfo &IFI,
88 AAResults *CalleeAAR,
89 bool InsertLifetime) {
90 return InlineFunction(CallSite(CB), IFI, CalleeAAR, InsertLifetime);
91}
92
93namespace {
94
95 /// A class for recording information about inlining a landing pad.
96 class LandingPadInliningInfo {
97 /// Destination of the invoke's unwind.
98 BasicBlock *OuterResumeDest;
99
100 /// Destination for the callee's resume.
101 BasicBlock *InnerResumeDest = nullptr;
102
103 /// LandingPadInst associated with the invoke.
104 LandingPadInst *CallerLPad = nullptr;
105
106 /// PHI for EH values from landingpad insts.
107 PHINode *InnerEHValuesPHI = nullptr;
108
109 SmallVector<Value*, 8> UnwindDestPHIValues;
110
111 public:
112 LandingPadInliningInfo(InvokeInst *II)
113 : OuterResumeDest(II->getUnwindDest()) {
114 // If there are PHI nodes in the unwind destination block, we need to keep
115 // track of which values came into them from the invoke before removing
116 // the edge from this block.
117 BasicBlock *InvokeBB = II->getParent();
118 BasicBlock::iterator I = OuterResumeDest->begin();
119 for (; isa<PHINode>(I); ++I) {
120 // Save the value to use for this edge.
121 PHINode *PHI = cast<PHINode>(I);
122 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
123 }
124
125 CallerLPad = cast<LandingPadInst>(I);
126 }
127
128 /// The outer unwind destination is the target of
129 /// unwind edges introduced for calls within the inlined function.
130 BasicBlock *getOuterResumeDest() const {
131 return OuterResumeDest;
132 }
133
134 BasicBlock *getInnerResumeDest();
135
136 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
137
138 /// Forward the 'resume' instruction to the caller's landing pad block.
139 /// When the landing pad block has only one predecessor, this is
140 /// a simple branch. When there is more than one predecessor, we need to
141 /// split the landing pad block after the landingpad instruction and jump
142 /// to there.
143 void forwardResume(ResumeInst *RI,
144 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
145
146 /// Add incoming-PHI values to the unwind destination block for the given
147 /// basic block, using the values for the original invoke's source block.
148 void addIncomingPHIValuesFor(BasicBlock *BB) const {
149 addIncomingPHIValuesForInto(BB, OuterResumeDest);
150 }
151
152 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
153 BasicBlock::iterator I = dest->begin();
154 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
155 PHINode *phi = cast<PHINode>(I);
156 phi->addIncoming(UnwindDestPHIValues[i], src);
157 }
158 }
159 };
160
161} // end anonymous namespace
162
163/// Get or create a target for the branch from ResumeInsts.
164BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
165 if (InnerResumeDest) return InnerResumeDest;
166
167 // Split the landing pad.
168 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
169 InnerResumeDest =
170 OuterResumeDest->splitBasicBlock(SplitPoint,
171 OuterResumeDest->getName() + ".body");
172
173 // The number of incoming edges we expect to the inner landing pad.
174 const unsigned PHICapacity = 2;
175
176 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
177 Instruction *InsertPoint = &InnerResumeDest->front();
178 BasicBlock::iterator I = OuterResumeDest->begin();
179 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
180 PHINode *OuterPHI = cast<PHINode>(I);
181 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
182 OuterPHI->getName() + ".lpad-body",
183 InsertPoint);
184 OuterPHI->replaceAllUsesWith(InnerPHI);
185 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
186 }
187
188 // Create a PHI for the exception values.
189 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
190 "eh.lpad-body", InsertPoint);
191 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
192 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
193
194 // All done.
195 return InnerResumeDest;
196}
197
198/// Forward the 'resume' instruction to the caller's landing pad block.
199/// When the landing pad block has only one predecessor, this is a simple
200/// branch. When there is more than one predecessor, we need to split the
201/// landing pad block after the landingpad instruction and jump to there.
202void LandingPadInliningInfo::forwardResume(
203 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
204 BasicBlock *Dest = getInnerResumeDest();
205 BasicBlock *Src = RI->getParent();
206
207 BranchInst::Create(Dest, Src);
208
209 // Update the PHIs in the destination. They were inserted in an order which
210 // makes this work.
211 addIncomingPHIValuesForInto(Src, Dest);
212
213 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
214 RI->eraseFromParent();
215}
216
217/// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
218static Value *getParentPad(Value *EHPad) {
219 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
220 return FPI->getParentPad();
221 return cast<CatchSwitchInst>(EHPad)->getParentPad();
222}
223
224using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
225
226/// Helper for getUnwindDestToken that does the descendant-ward part of
227/// the search.
228static Value *getUnwindDestTokenHelper(Instruction *EHPad,
229 UnwindDestMemoTy &MemoMap) {
230 SmallVector<Instruction *, 8> Worklist(1, EHPad);
231
232 while (!Worklist.empty()) {
233 Instruction *CurrentPad = Worklist.pop_back_val();
234 // We only put pads on the worklist that aren't in the MemoMap. When
235 // we find an unwind dest for a pad we may update its ancestors, but
236 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
237 // so they should never get updated while queued on the worklist.
238 assert(!MemoMap.count(CurrentPad))((!MemoMap.count(CurrentPad)) ? static_cast<void> (0) :
__assert_fail ("!MemoMap.count(CurrentPad)", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 238, __PRETTY_FUNCTION__))
;
239 Value *UnwindDestToken = nullptr;
240 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
241 if (CatchSwitch->hasUnwindDest()) {
242 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
243 } else {
244 // Catchswitch doesn't have a 'nounwind' variant, and one might be
245 // annotated as "unwinds to caller" when really it's nounwind (see
246 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
247 // parent's unwind dest from this. We can check its catchpads'
248 // descendants, since they might include a cleanuppad with an
249 // "unwinds to caller" cleanupret, which can be trusted.
250 for (auto HI = CatchSwitch->handler_begin(),
251 HE = CatchSwitch->handler_end();
252 HI != HE && !UnwindDestToken; ++HI) {
253 BasicBlock *HandlerBlock = *HI;
254 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
255 for (User *Child : CatchPad->users()) {
256 // Intentionally ignore invokes here -- since the catchswitch is
257 // marked "unwind to caller", it would be a verifier error if it
258 // contained an invoke which unwinds out of it, so any invoke we'd
259 // encounter must unwind to some child of the catch.
260 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
261 continue;
262
263 Instruction *ChildPad = cast<Instruction>(Child);
264 auto Memo = MemoMap.find(ChildPad);
265 if (Memo == MemoMap.end()) {
266 // Haven't figured out this child pad yet; queue it.
267 Worklist.push_back(ChildPad);
268 continue;
269 }
270 // We've already checked this child, but might have found that
271 // it offers no proof either way.
272 Value *ChildUnwindDestToken = Memo->second;
273 if (!ChildUnwindDestToken)
274 continue;
275 // We already know the child's unwind dest, which can either
276 // be ConstantTokenNone to indicate unwind to caller, or can
277 // be another child of the catchpad. Only the former indicates
278 // the unwind dest of the catchswitch.
279 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
280 UnwindDestToken = ChildUnwindDestToken;
281 break;
282 }
283 assert(getParentPad(ChildUnwindDestToken) == CatchPad)((getParentPad(ChildUnwindDestToken) == CatchPad) ? static_cast
<void> (0) : __assert_fail ("getParentPad(ChildUnwindDestToken) == CatchPad"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 283, __PRETTY_FUNCTION__))
;
284 }
285 }
286 }
287 } else {
288 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
289 for (User *U : CleanupPad->users()) {
290 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
291 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
292 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
293 else
294 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
295 break;
296 }
297 Value *ChildUnwindDestToken;
298 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
299 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
300 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
301 Instruction *ChildPad = cast<Instruction>(U);
302 auto Memo = MemoMap.find(ChildPad);
303 if (Memo == MemoMap.end()) {
304 // Haven't resolved this child yet; queue it and keep searching.
305 Worklist.push_back(ChildPad);
306 continue;
307 }
308 // We've checked this child, but still need to ignore it if it
309 // had no proof either way.
310 ChildUnwindDestToken = Memo->second;
311 if (!ChildUnwindDestToken)
312 continue;
313 } else {
314 // Not a relevant user of the cleanuppad
315 continue;
316 }
317 // In a well-formed program, the child/invoke must either unwind to
318 // an(other) child of the cleanup, or exit the cleanup. In the
319 // first case, continue searching.
320 if (isa<Instruction>(ChildUnwindDestToken) &&
321 getParentPad(ChildUnwindDestToken) == CleanupPad)
322 continue;
323 UnwindDestToken = ChildUnwindDestToken;
324 break;
325 }
326 }
327 // If we haven't found an unwind dest for CurrentPad, we may have queued its
328 // children, so move on to the next in the worklist.
329 if (!UnwindDestToken)
330 continue;
331
332 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
333 // any ancestors of CurrentPad up to but not including UnwindDestToken's
334 // parent pad. Record this in the memo map, and check to see if the
335 // original EHPad being queried is one of the ones exited.
336 Value *UnwindParent;
337 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
338 UnwindParent = getParentPad(UnwindPad);
339 else
340 UnwindParent = nullptr;
341 bool ExitedOriginalPad = false;
342 for (Instruction *ExitedPad = CurrentPad;
343 ExitedPad && ExitedPad != UnwindParent;
344 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
345 // Skip over catchpads since they just follow their catchswitches.
346 if (isa<CatchPadInst>(ExitedPad))
347 continue;
348 MemoMap[ExitedPad] = UnwindDestToken;
349 ExitedOriginalPad |= (ExitedPad == EHPad);
350 }
351
352 if (ExitedOriginalPad)
353 return UnwindDestToken;
354
355 // Continue the search.
356 }
357
358 // No definitive information is contained within this funclet.
359 return nullptr;
360}
361
362/// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
363/// return that pad instruction. If it unwinds to caller, return
364/// ConstantTokenNone. If it does not have a definitive unwind destination,
365/// return nullptr.
366///
367/// This routine gets invoked for calls in funclets in inlinees when inlining
368/// an invoke. Since many funclets don't have calls inside them, it's queried
369/// on-demand rather than building a map of pads to unwind dests up front.
370/// Determining a funclet's unwind dest may require recursively searching its
371/// descendants, and also ancestors and cousins if the descendants don't provide
372/// an answer. Since most funclets will have their unwind dest immediately
373/// available as the unwind dest of a catchswitch or cleanupret, this routine
374/// searches top-down from the given pad and then up. To avoid worst-case
375/// quadratic run-time given that approach, it uses a memo map to avoid
376/// re-processing funclet trees. The callers that rewrite the IR as they go
377/// take advantage of this, for correctness, by checking/forcing rewritten
378/// pads' entries to match the original callee view.
379static Value *getUnwindDestToken(Instruction *EHPad,
380 UnwindDestMemoTy &MemoMap) {
381 // Catchpads unwind to the same place as their catchswitch;
382 // redirct any queries on catchpads so the code below can
383 // deal with just catchswitches and cleanuppads.
384 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
385 EHPad = CPI->getCatchSwitch();
386
387 // Check if we've already determined the unwind dest for this pad.
388 auto Memo = MemoMap.find(EHPad);
389 if (Memo != MemoMap.end())
390 return Memo->second;
391
392 // Search EHPad and, if necessary, its descendants.
393 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
394 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0))(((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)
) ? static_cast<void> (0) : __assert_fail ("(UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 394, __PRETTY_FUNCTION__))
;
395 if (UnwindDestToken)
396 return UnwindDestToken;
397
398 // No information is available for this EHPad from itself or any of its
399 // descendants. An unwind all the way out to a pad in the caller would
400 // need also to agree with the unwind dest of the parent funclet, so
401 // search up the chain to try to find a funclet with information. Put
402 // null entries in the memo map to avoid re-processing as we go up.
403 MemoMap[EHPad] = nullptr;
404#ifndef NDEBUG
405 SmallPtrSet<Instruction *, 4> TempMemos;
406 TempMemos.insert(EHPad);
407#endif
408 Instruction *LastUselessPad = EHPad;
409 Value *AncestorToken;
410 for (AncestorToken = getParentPad(EHPad);
411 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
412 AncestorToken = getParentPad(AncestorToken)) {
413 // Skip over catchpads since they just follow their catchswitches.
414 if (isa<CatchPadInst>(AncestorPad))
415 continue;
416 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
417 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
418 // call to getUnwindDestToken, that would mean that AncestorPad had no
419 // information in itself, its descendants, or its ancestors. If that
420 // were the case, then we should also have recorded the lack of information
421 // for the descendant that we're coming from. So assert that we don't
422 // find a null entry in the MemoMap for AncestorPad.
423 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad])((!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]) ? static_cast
<void> (0) : __assert_fail ("!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 423, __PRETTY_FUNCTION__))
;
424 auto AncestorMemo = MemoMap.find(AncestorPad);
425 if (AncestorMemo == MemoMap.end()) {
426 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
427 } else {
428 UnwindDestToken = AncestorMemo->second;
429 }
430 if (UnwindDestToken)
431 break;
432 LastUselessPad = AncestorPad;
433 MemoMap[LastUselessPad] = nullptr;
434#ifndef NDEBUG
435 TempMemos.insert(LastUselessPad);
436#endif
437 }
438
439 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
440 // returned nullptr (and likewise for EHPad and any of its ancestors up to
441 // LastUselessPad), so LastUselessPad has no information from below. Since
442 // getUnwindDestTokenHelper must investigate all downward paths through
443 // no-information nodes to prove that a node has no information like this,
444 // and since any time it finds information it records it in the MemoMap for
445 // not just the immediately-containing funclet but also any ancestors also
446 // exited, it must be the case that, walking downward from LastUselessPad,
447 // visiting just those nodes which have not been mapped to an unwind dest
448 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
449 // they are just used to keep getUnwindDestTokenHelper from repeating work),
450 // any node visited must have been exhaustively searched with no information
451 // for it found.
452 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
453 while (!Worklist.empty()) {
454 Instruction *UselessPad = Worklist.pop_back_val();
455 auto Memo = MemoMap.find(UselessPad);
456 if (Memo != MemoMap.end() && Memo->second) {
457 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
458 // that it is a funclet that does have information about unwinding to
459 // a particular destination; its parent was a useless pad.
460 // Since its parent has no information, the unwind edge must not escape
461 // the parent, and must target a sibling of this pad. This local unwind
462 // gives us no information about EHPad. Leave it and the subtree rooted
463 // at it alone.
464 assert(getParentPad(Memo->second) == getParentPad(UselessPad))((getParentPad(Memo->second) == getParentPad(UselessPad)) ?
static_cast<void> (0) : __assert_fail ("getParentPad(Memo->second) == getParentPad(UselessPad)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 464, __PRETTY_FUNCTION__))
;
465 continue;
466 }
467 // We know we don't have information for UselesPad. If it has an entry in
468 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
469 // added on this invocation of getUnwindDestToken; if a previous invocation
470 // recorded nullptr, it would have had to prove that the ancestors of
471 // UselessPad, which include LastUselessPad, had no information, and that
472 // in turn would have required proving that the descendants of
473 // LastUselesPad, which include EHPad, have no information about
474 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
475 // the MemoMap on that invocation, which isn't the case if we got here.
476 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad))((!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)) ?
static_cast<void> (0) : __assert_fail ("!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 476, __PRETTY_FUNCTION__))
;
477 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
478 // information that we'd be contradicting by making a map entry for it
479 // (which is something that getUnwindDestTokenHelper must have proved for
480 // us to get here). Just assert on is direct users here; the checks in
481 // this downward walk at its descendants will verify that they don't have
482 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
483 // unwind edges or unwind to a sibling).
484 MemoMap[UselessPad] = UnwindDestToken;
485 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
486 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad")((CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"
) ? static_cast<void> (0) : __assert_fail ("CatchSwitch->getUnwindDest() == nullptr && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 486, __PRETTY_FUNCTION__))
;
487 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
488 auto *CatchPad = HandlerBlock->getFirstNonPHI();
489 for (User *U : CatchPad->users()) {
490 assert((((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 495, __PRETTY_FUNCTION__))
491 (!isa<InvokeInst>(U) ||(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 495, __PRETTY_FUNCTION__))
492 (getParentPad((((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 495, __PRETTY_FUNCTION__))
493 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 495, __PRETTY_FUNCTION__))
494 CatchPad)) &&(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 495, __PRETTY_FUNCTION__))
495 "Expected useless pad")(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 495, __PRETTY_FUNCTION__))
;
496 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
497 Worklist.push_back(cast<Instruction>(U));
498 }
499 }
500 } else {
501 assert(isa<CleanupPadInst>(UselessPad))((isa<CleanupPadInst>(UselessPad)) ? static_cast<void
> (0) : __assert_fail ("isa<CleanupPadInst>(UselessPad)"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 501, __PRETTY_FUNCTION__))
;
502 for (User *U : UselessPad->users()) {
503 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad")((!isa<CleanupReturnInst>(U) && "Expected useless pad"
) ? static_cast<void> (0) : __assert_fail ("!isa<CleanupReturnInst>(U) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 503, __PRETTY_FUNCTION__))
;
504 assert((!isa<InvokeInst>(U) ||(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 508, __PRETTY_FUNCTION__))
505 (getParentPad((((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 508, __PRETTY_FUNCTION__))
506 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 508, __PRETTY_FUNCTION__))
507 UselessPad)) &&(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 508, __PRETTY_FUNCTION__))
508 "Expected useless pad")(((!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst
>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad
)) && "Expected useless pad") ? static_cast<void>
(0) : __assert_fail ("(!isa<InvokeInst>(U) || (getParentPad( cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad)) && \"Expected useless pad\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 508, __PRETTY_FUNCTION__))
;
509 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
510 Worklist.push_back(cast<Instruction>(U));
511 }
512 }
513 }
514
515 return UnwindDestToken;
516}
517
518/// When we inline a basic block into an invoke,
519/// we have to turn all of the calls that can throw into invokes.
520/// This function analyze BB to see if there are any calls, and if so,
521/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
522/// nodes in that block with the values specified in InvokeDestPHIValues.
523static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
524 BasicBlock *BB, BasicBlock *UnwindEdge,
525 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
526 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
11
Calling 'operator!='
14
Returning from 'operator!='
15
Loop condition is true. Entering loop body
527 Instruction *I = &*BBI++;
528
529 // We only need to check for function calls: inlined invoke
530 // instructions require no special handling.
531 CallInst *CI = dyn_cast<CallInst>(I);
16
Assuming 'I' is a 'CallInst'
532
533 if (!CI
16.1
'CI' is non-null
16.1
'CI' is non-null
16.1
'CI' is non-null
|| CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
17
Assuming the condition is false
18
Assuming the object is not a 'InlineAsm'
19
Taking false branch
534 continue;
535
536 // We do not need to (and in fact, cannot) convert possibly throwing calls
537 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
538 // invokes. The caller's "segment" of the deoptimization continuation
539 // attached to the newly inlined @llvm.experimental_deoptimize
540 // (resp. @llvm.experimental.guard) call should contain the exception
541 // handling logic, if any.
542 if (auto *F
23.1
'F' is null
23.1
'F' is null
23.1
'F' is null
= CI->getCalledFunction())
20
Calling 'CallBase::getCalledFunction'
23
Returning from 'CallBase::getCalledFunction'
24
Taking false branch
543 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
544 F->getIntrinsicID() == Intrinsic::experimental_guard)
545 continue;
546
547 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
25
Assuming the condition is true
26
Taking true branch
548 // This call is nested inside a funclet. If that funclet has an unwind
549 // destination within the inlinee, then unwinding out of this call would
550 // be UB. Rewriting this call to an invoke which targets the inlined
551 // invoke's unwind dest would give the call's parent funclet multiple
552 // unwind destinations, which is something that subsequent EH table
553 // generation can't handle and that the veirifer rejects. So when we
554 // see such a call, leave it as a call.
555 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
556 Value *UnwindDestToken =
557 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
27
Forming reference to null pointer
558 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
559 continue;
560#ifndef NDEBUG
561 Instruction *MemoKey;
562 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
563 MemoKey = CatchPad->getCatchSwitch();
564 else
565 MemoKey = FuncletPad;
566 assert(FuncletUnwindMap->count(MemoKey) &&((FuncletUnwindMap->count(MemoKey) && (*FuncletUnwindMap
)[MemoKey] == UnwindDestToken && "must get memoized to avoid confusing later searches"
) ? static_cast<void> (0) : __assert_fail ("FuncletUnwindMap->count(MemoKey) && (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && \"must get memoized to avoid confusing later searches\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 568, __PRETTY_FUNCTION__))
567 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&((FuncletUnwindMap->count(MemoKey) && (*FuncletUnwindMap
)[MemoKey] == UnwindDestToken && "must get memoized to avoid confusing later searches"
) ? static_cast<void> (0) : __assert_fail ("FuncletUnwindMap->count(MemoKey) && (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && \"must get memoized to avoid confusing later searches\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 568, __PRETTY_FUNCTION__))
568 "must get memoized to avoid confusing later searches")((FuncletUnwindMap->count(MemoKey) && (*FuncletUnwindMap
)[MemoKey] == UnwindDestToken && "must get memoized to avoid confusing later searches"
) ? static_cast<void> (0) : __assert_fail ("FuncletUnwindMap->count(MemoKey) && (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && \"must get memoized to avoid confusing later searches\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 568, __PRETTY_FUNCTION__))
;
569#endif // NDEBUG
570 }
571
572 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
573 return BB;
574 }
575 return nullptr;
576}
577
578/// If we inlined an invoke site, we need to convert calls
579/// in the body of the inlined function into invokes.
580///
581/// II is the invoke instruction being inlined. FirstNewBlock is the first
582/// block of the inlined code (the last block is the end of the function),
583/// and InlineCodeInfo is information about the code that got inlined.
584static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
585 ClonedCodeInfo &InlinedCodeInfo) {
586 BasicBlock *InvokeDest = II->getUnwindDest();
587
588 Function *Caller = FirstNewBlock->getParent();
589
590 // The inlined code is currently at the end of the function, scan from the
591 // start of the inlined code to its end, checking for stuff we need to
592 // rewrite.
593 LandingPadInliningInfo Invoke(II);
594
595 // Get all of the inlined landing pad instructions.
596 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
597 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
1
Loop condition is false. Execution continues on line 604
598 I != E; ++I)
599 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
600 InlinedLPads.insert(II->getLandingPadInst());
601
602 // Append the clauses from the outer landing pad instruction into the inlined
603 // landing pad instructions.
604 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
605 for (LandingPadInst *InlinedLPad : InlinedLPads) {
606 unsigned OuterNum = OuterLPad->getNumClauses();
607 InlinedLPad->reserveClauses(OuterNum);
608 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
609 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
610 if (OuterLPad->isCleanup())
611 InlinedLPad->setCleanup(true);
612 }
613
614 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
6
Loop condition is true. Entering loop body
615 BB != E; ++BB) {
2
Calling 'operator!='
5
Returning from 'operator!='
616 if (InlinedCodeInfo.ContainsCalls)
7
Assuming field 'ContainsCalls' is true
8
Taking true branch
617 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
9
Passing null pointer value via 3rd parameter 'FuncletUnwindMap'
10
Calling 'HandleCallsInBlockInlinedThroughInvoke'
618 &*BB, Invoke.getOuterResumeDest()))
619 // Update any PHI nodes in the exceptional block to indicate that there
620 // is now a new entry in them.
621 Invoke.addIncomingPHIValuesFor(NewBB);
622
623 // Forward any resumes that are remaining here.
624 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
625 Invoke.forwardResume(RI, InlinedLPads);
626 }
627
628 // Now that everything is happy, we have one final detail. The PHI nodes in
629 // the exception destination block still have entries due to the original
630 // invoke instruction. Eliminate these entries (which might even delete the
631 // PHI node) now.
632 InvokeDest->removePredecessor(II->getParent());
633}
634
635/// If we inlined an invoke site, we need to convert calls
636/// in the body of the inlined function into invokes.
637///
638/// II is the invoke instruction being inlined. FirstNewBlock is the first
639/// block of the inlined code (the last block is the end of the function),
640/// and InlineCodeInfo is information about the code that got inlined.
641static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
642 ClonedCodeInfo &InlinedCodeInfo) {
643 BasicBlock *UnwindDest = II->getUnwindDest();
644 Function *Caller = FirstNewBlock->getParent();
645
646 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!")((UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"
) ? static_cast<void> (0) : __assert_fail ("UnwindDest->getFirstNonPHI()->isEHPad() && \"unexpected BasicBlock!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 646, __PRETTY_FUNCTION__))
;
647
648 // If there are PHI nodes in the unwind destination block, we need to keep
649 // track of which values came into them from the invoke before removing the
650 // edge from this block.
651 SmallVector<Value *, 8> UnwindDestPHIValues;
652 BasicBlock *InvokeBB = II->getParent();
653 for (Instruction &I : *UnwindDest) {
654 // Save the value to use for this edge.
655 PHINode *PHI = dyn_cast<PHINode>(&I);
656 if (!PHI)
657 break;
658 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
659 }
660
661 // Add incoming-PHI values to the unwind destination block for the given basic
662 // block, using the values for the original invoke's source block.
663 auto UpdatePHINodes = [&](BasicBlock *Src) {
664 BasicBlock::iterator I = UnwindDest->begin();
665 for (Value *V : UnwindDestPHIValues) {
666 PHINode *PHI = cast<PHINode>(I);
667 PHI->addIncoming(V, Src);
668 ++I;
669 }
670 };
671
672 // This connects all the instructions which 'unwind to caller' to the invoke
673 // destination.
674 UnwindDestMemoTy FuncletUnwindMap;
675 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
676 BB != E; ++BB) {
677 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
678 if (CRI->unwindsToCaller()) {
679 auto *CleanupPad = CRI->getCleanupPad();
680 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
681 CRI->eraseFromParent();
682 UpdatePHINodes(&*BB);
683 // Finding a cleanupret with an unwind destination would confuse
684 // subsequent calls to getUnwindDestToken, so map the cleanuppad
685 // to short-circuit any such calls and recognize this as an "unwind
686 // to caller" cleanup.
687 assert(!FuncletUnwindMap.count(CleanupPad) ||((!FuncletUnwindMap.count(CleanupPad) || isa<ConstantTokenNone
>(FuncletUnwindMap[CleanupPad])) ? static_cast<void>
(0) : __assert_fail ("!FuncletUnwindMap.count(CleanupPad) || isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 688, __PRETTY_FUNCTION__))
688 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]))((!FuncletUnwindMap.count(CleanupPad) || isa<ConstantTokenNone
>(FuncletUnwindMap[CleanupPad])) ? static_cast<void>
(0) : __assert_fail ("!FuncletUnwindMap.count(CleanupPad) || isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 688, __PRETTY_FUNCTION__))
;
689 FuncletUnwindMap[CleanupPad] =
690 ConstantTokenNone::get(Caller->getContext());
691 }
692 }
693
694 Instruction *I = BB->getFirstNonPHI();
695 if (!I->isEHPad())
696 continue;
697
698 Instruction *Replacement = nullptr;
699 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
700 if (CatchSwitch->unwindsToCaller()) {
701 Value *UnwindDestToken;
702 if (auto *ParentPad =
703 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
704 // This catchswitch is nested inside another funclet. If that
705 // funclet has an unwind destination within the inlinee, then
706 // unwinding out of this catchswitch would be UB. Rewriting this
707 // catchswitch to unwind to the inlined invoke's unwind dest would
708 // give the parent funclet multiple unwind destinations, which is
709 // something that subsequent EH table generation can't handle and
710 // that the veirifer rejects. So when we see such a call, leave it
711 // as "unwind to caller".
712 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
713 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
714 continue;
715 } else {
716 // This catchswitch has no parent to inherit constraints from, and
717 // none of its descendants can have an unwind edge that exits it and
718 // targets another funclet in the inlinee. It may or may not have a
719 // descendant that definitively has an unwind to caller. In either
720 // case, we'll have to assume that any unwinds out of it may need to
721 // be routed to the caller, so treat it as though it has a definitive
722 // unwind to caller.
723 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
724 }
725 auto *NewCatchSwitch = CatchSwitchInst::Create(
726 CatchSwitch->getParentPad(), UnwindDest,
727 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
728 CatchSwitch);
729 for (BasicBlock *PadBB : CatchSwitch->handlers())
730 NewCatchSwitch->addHandler(PadBB);
731 // Propagate info for the old catchswitch over to the new one in
732 // the unwind map. This also serves to short-circuit any subsequent
733 // checks for the unwind dest of this catchswitch, which would get
734 // confused if they found the outer handler in the callee.
735 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
736 Replacement = NewCatchSwitch;
737 }
738 } else if (!isa<FuncletPadInst>(I)) {
739 llvm_unreachable("unexpected EHPad!")::llvm::llvm_unreachable_internal("unexpected EHPad!", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 739)
;
740 }
741
742 if (Replacement) {
743 Replacement->takeName(I);
744 I->replaceAllUsesWith(Replacement);
745 I->eraseFromParent();
746 UpdatePHINodes(&*BB);
747 }
748 }
749
750 if (InlinedCodeInfo.ContainsCalls)
751 for (Function::iterator BB = FirstNewBlock->getIterator(),
752 E = Caller->end();
753 BB != E; ++BB)
754 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
755 &*BB, UnwindDest, &FuncletUnwindMap))
756 // Update any PHI nodes in the exceptional block to indicate that there
757 // is now a new entry in them.
758 UpdatePHINodes(NewBB);
759
760 // Now that everything is happy, we have one final detail. The PHI nodes in
761 // the exception destination block still have entries due to the original
762 // invoke instruction. Eliminate these entries (which might even delete the
763 // PHI node) now.
764 UnwindDest->removePredecessor(InvokeBB);
765}
766
767/// When inlining a call site that has !llvm.mem.parallel_loop_access or
768/// llvm.access.group metadata, that metadata should be propagated to all
769/// memory-accessing cloned instructions.
770static void PropagateParallelLoopAccessMetadata(CallSite CS,
771 ValueToValueMapTy &VMap) {
772 MDNode *M =
773 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
774 MDNode *CallAccessGroup =
775 CS.getInstruction()->getMetadata(LLVMContext::MD_access_group);
776 if (!M && !CallAccessGroup)
777 return;
778
779 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
780 VMI != VMIE; ++VMI) {
781 if (!VMI->second)
782 continue;
783
784 Instruction *NI = dyn_cast<Instruction>(VMI->second);
785 if (!NI)
786 continue;
787
788 if (M) {
789 if (MDNode *PM =
790 NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
791 M = MDNode::concatenate(PM, M);
792 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
793 } else if (NI->mayReadOrWriteMemory()) {
794 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
795 }
796 }
797
798 if (NI->mayReadOrWriteMemory()) {
799 MDNode *UnitedAccGroups = uniteAccessGroups(
800 NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
801 NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
802 }
803 }
804}
805
806/// When inlining a function that contains noalias scope metadata,
807/// this metadata needs to be cloned so that the inlined blocks
808/// have different "unique scopes" at every call site. Were this not done, then
809/// aliasing scopes from a function inlined into a caller multiple times could
810/// not be differentiated (and this would lead to miscompiles because the
811/// non-aliasing property communicated by the metadata could have
812/// call-site-specific control dependencies).
813static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
814 const Function *CalledFunc = CS.getCalledFunction();
815 SetVector<const MDNode *> MD;
816
817 // Note: We could only clone the metadata if it is already used in the
818 // caller. I'm omitting that check here because it might confuse
819 // inter-procedural alias analysis passes. We can revisit this if it becomes
820 // an efficiency or overhead problem.
821
822 for (const BasicBlock &I : *CalledFunc)
823 for (const Instruction &J : I) {
824 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
825 MD.insert(M);
826 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
827 MD.insert(M);
828 }
829
830 if (MD.empty())
831 return;
832
833 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
834 // the set.
835 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
836 while (!Queue.empty()) {
837 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
838 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
839 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
840 if (MD.insert(M1))
841 Queue.push_back(M1);
842 }
843
844 // Now we have a complete set of all metadata in the chains used to specify
845 // the noalias scopes and the lists of those scopes.
846 SmallVector<TempMDTuple, 16> DummyNodes;
847 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
848 for (const MDNode *I : MD) {
849 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
850 MDMap[I].reset(DummyNodes.back().get());
851 }
852
853 // Create new metadata nodes to replace the dummy nodes, replacing old
854 // metadata references with either a dummy node or an already-created new
855 // node.
856 for (const MDNode *I : MD) {
857 SmallVector<Metadata *, 4> NewOps;
858 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
859 const Metadata *V = I->getOperand(i);
860 if (const MDNode *M = dyn_cast<MDNode>(V))
861 NewOps.push_back(MDMap[M]);
862 else
863 NewOps.push_back(const_cast<Metadata *>(V));
864 }
865
866 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
867 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
868 assert(TempM->isTemporary() && "Expected temporary node")((TempM->isTemporary() && "Expected temporary node"
) ? static_cast<void> (0) : __assert_fail ("TempM->isTemporary() && \"Expected temporary node\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 868, __PRETTY_FUNCTION__))
;
869
870 TempM->replaceAllUsesWith(NewM);
871 }
872
873 // Now replace the metadata in the new inlined instructions with the
874 // repacements from the map.
875 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
876 VMI != VMIE; ++VMI) {
877 if (!VMI->second)
878 continue;
879
880 Instruction *NI = dyn_cast<Instruction>(VMI->second);
881 if (!NI)
882 continue;
883
884 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
885 MDNode *NewMD = MDMap[M];
886 // If the call site also had alias scope metadata (a list of scopes to
887 // which instructions inside it might belong), propagate those scopes to
888 // the inlined instructions.
889 if (MDNode *CSM =
890 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
891 NewMD = MDNode::concatenate(NewMD, CSM);
892 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
893 } else if (NI->mayReadOrWriteMemory()) {
894 if (MDNode *M =
895 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
896 NI->setMetadata(LLVMContext::MD_alias_scope, M);
897 }
898
899 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
900 MDNode *NewMD = MDMap[M];
901 // If the call site also had noalias metadata (a list of scopes with
902 // which instructions inside it don't alias), propagate those scopes to
903 // the inlined instructions.
904 if (MDNode *CSM =
905 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
906 NewMD = MDNode::concatenate(NewMD, CSM);
907 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
908 } else if (NI->mayReadOrWriteMemory()) {
909 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
910 NI->setMetadata(LLVMContext::MD_noalias, M);
911 }
912 }
913}
914
915/// If the inlined function has noalias arguments,
916/// then add new alias scopes for each noalias argument, tag the mapped noalias
917/// parameters with noalias metadata specifying the new scope, and tag all
918/// non-derived loads, stores and memory intrinsics with the new alias scopes.
919static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
920 const DataLayout &DL, AAResults *CalleeAAR) {
921 if (!EnableNoAliasConversion)
922 return;
923
924 const Function *CalledFunc = CS.getCalledFunction();
925 SmallVector<const Argument *, 4> NoAliasArgs;
926
927 for (const Argument &Arg : CalledFunc->args())
928 if (Arg.hasNoAliasAttr() && !Arg.use_empty())
929 NoAliasArgs.push_back(&Arg);
930
931 if (NoAliasArgs.empty())
932 return;
933
934 // To do a good job, if a noalias variable is captured, we need to know if
935 // the capture point dominates the particular use we're considering.
936 DominatorTree DT;
937 DT.recalculate(const_cast<Function&>(*CalledFunc));
938
939 // noalias indicates that pointer values based on the argument do not alias
940 // pointer values which are not based on it. So we add a new "scope" for each
941 // noalias function argument. Accesses using pointers based on that argument
942 // become part of that alias scope, accesses using pointers not based on that
943 // argument are tagged as noalias with that scope.
944
945 DenseMap<const Argument *, MDNode *> NewScopes;
946 MDBuilder MDB(CalledFunc->getContext());
947
948 // Create a new scope domain for this function.
949 MDNode *NewDomain =
950 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
951 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
952 const Argument *A = NoAliasArgs[i];
953
954 std::string Name = CalledFunc->getName();
955 if (A->hasName()) {
956 Name += ": %";
957 Name += A->getName();
958 } else {
959 Name += ": argument ";
960 Name += utostr(i);
961 }
962
963 // Note: We always create a new anonymous root here. This is true regardless
964 // of the linkage of the callee because the aliasing "scope" is not just a
965 // property of the callee, but also all control dependencies in the caller.
966 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
967 NewScopes.insert(std::make_pair(A, NewScope));
968 }
969
970 // Iterate over all new instructions in the map; for all memory-access
971 // instructions, add the alias scope metadata.
972 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
973 VMI != VMIE; ++VMI) {
974 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
975 if (!VMI->second)
976 continue;
977
978 Instruction *NI = dyn_cast<Instruction>(VMI->second);
979 if (!NI)
980 continue;
981
982 bool IsArgMemOnlyCall = false, IsFuncCall = false;
983 SmallVector<const Value *, 2> PtrArgs;
984
985 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
986 PtrArgs.push_back(LI->getPointerOperand());
987 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
988 PtrArgs.push_back(SI->getPointerOperand());
989 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
990 PtrArgs.push_back(VAAI->getPointerOperand());
991 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
992 PtrArgs.push_back(CXI->getPointerOperand());
993 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
994 PtrArgs.push_back(RMWI->getPointerOperand());
995 else if (const auto *Call = dyn_cast<CallBase>(I)) {
996 // If we know that the call does not access memory, then we'll still
997 // know that about the inlined clone of this call site, and we don't
998 // need to add metadata.
999 if (Call->doesNotAccessMemory())
1000 continue;
1001
1002 IsFuncCall = true;
1003 if (CalleeAAR) {
1004 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1005 if (MRB == FMRB_OnlyAccessesArgumentPointees ||
1006 MRB == FMRB_OnlyReadsArgumentPointees)
1007 IsArgMemOnlyCall = true;
1008 }
1009
1010 for (Value *Arg : Call->args()) {
1011 // We need to check the underlying objects of all arguments, not just
1012 // the pointer arguments, because we might be passing pointers as
1013 // integers, etc.
1014 // However, if we know that the call only accesses pointer arguments,
1015 // then we only need to check the pointer arguments.
1016 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1017 continue;
1018
1019 PtrArgs.push_back(Arg);
1020 }
1021 }
1022
1023 // If we found no pointers, then this instruction is not suitable for
1024 // pairing with an instruction to receive aliasing metadata.
1025 // However, if this is a call, this we might just alias with none of the
1026 // noalias arguments.
1027 if (PtrArgs.empty() && !IsFuncCall)
1028 continue;
1029
1030 // It is possible that there is only one underlying object, but you
1031 // need to go through several PHIs to see it, and thus could be
1032 // repeated in the Objects list.
1033 SmallPtrSet<const Value *, 4> ObjSet;
1034 SmallVector<Metadata *, 4> Scopes, NoAliases;
1035
1036 SmallSetVector<const Argument *, 4> NAPtrArgs;
1037 for (const Value *V : PtrArgs) {
1038 SmallVector<const Value *, 4> Objects;
1039 GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
1040
1041 for (const Value *O : Objects)
1042 ObjSet.insert(O);
1043 }
1044
1045 // Figure out if we're derived from anything that is not a noalias
1046 // argument.
1047 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1048 for (const Value *V : ObjSet) {
1049 // Is this value a constant that cannot be derived from any pointer
1050 // value (we need to exclude constant expressions, for example, that
1051 // are formed from arithmetic on global symbols).
1052 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1053 isa<ConstantPointerNull>(V) ||
1054 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1055 if (IsNonPtrConst)
1056 continue;
1057
1058 // If this is anything other than a noalias argument, then we cannot
1059 // completely describe the aliasing properties using alias.scope
1060 // metadata (and, thus, won't add any).
1061 if (const Argument *A = dyn_cast<Argument>(V)) {
1062 if (!A->hasNoAliasAttr())
1063 UsesAliasingPtr = true;
1064 } else {
1065 UsesAliasingPtr = true;
1066 }
1067
1068 // If this is not some identified function-local object (which cannot
1069 // directly alias a noalias argument), or some other argument (which,
1070 // by definition, also cannot alias a noalias argument), then we could
1071 // alias a noalias argument that has been captured).
1072 if (!isa<Argument>(V) &&
1073 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1074 CanDeriveViaCapture = true;
1075 }
1076
1077 // A function call can always get captured noalias pointers (via other
1078 // parameters, globals, etc.).
1079 if (IsFuncCall && !IsArgMemOnlyCall)
1080 CanDeriveViaCapture = true;
1081
1082 // First, we want to figure out all of the sets with which we definitely
1083 // don't alias. Iterate over all noalias set, and add those for which:
1084 // 1. The noalias argument is not in the set of objects from which we
1085 // definitely derive.
1086 // 2. The noalias argument has not yet been captured.
1087 // An arbitrary function that might load pointers could see captured
1088 // noalias arguments via other noalias arguments or globals, and so we
1089 // must always check for prior capture.
1090 for (const Argument *A : NoAliasArgs) {
1091 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1092 // It might be tempting to skip the
1093 // PointerMayBeCapturedBefore check if
1094 // A->hasNoCaptureAttr() is true, but this is
1095 // incorrect because nocapture only guarantees
1096 // that no copies outlive the function, not
1097 // that the value cannot be locally captured.
1098 !PointerMayBeCapturedBefore(A,
1099 /* ReturnCaptures */ false,
1100 /* StoreCaptures */ false, I, &DT)))
1101 NoAliases.push_back(NewScopes[A]);
1102 }
1103
1104 if (!NoAliases.empty())
1105 NI->setMetadata(LLVMContext::MD_noalias,
1106 MDNode::concatenate(
1107 NI->getMetadata(LLVMContext::MD_noalias),
1108 MDNode::get(CalledFunc->getContext(), NoAliases)));
1109
1110 // Next, we want to figure out all of the sets to which we might belong.
1111 // We might belong to a set if the noalias argument is in the set of
1112 // underlying objects. If there is some non-noalias argument in our list
1113 // of underlying objects, then we cannot add a scope because the fact
1114 // that some access does not alias with any set of our noalias arguments
1115 // cannot itself guarantee that it does not alias with this access
1116 // (because there is some pointer of unknown origin involved and the
1117 // other access might also depend on this pointer). We also cannot add
1118 // scopes to arbitrary functions unless we know they don't access any
1119 // non-parameter pointer-values.
1120 bool CanAddScopes = !UsesAliasingPtr;
1121 if (CanAddScopes && IsFuncCall)
1122 CanAddScopes = IsArgMemOnlyCall;
1123
1124 if (CanAddScopes)
1125 for (const Argument *A : NoAliasArgs) {
1126 if (ObjSet.count(A))
1127 Scopes.push_back(NewScopes[A]);
1128 }
1129
1130 if (!Scopes.empty())
1131 NI->setMetadata(
1132 LLVMContext::MD_alias_scope,
1133 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1134 MDNode::get(CalledFunc->getContext(), Scopes)));
1135 }
1136 }
1137}
1138
1139/// If the inlined function has non-byval align arguments, then
1140/// add @llvm.assume-based alignment assumptions to preserve this information.
1141static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1142 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1143 return;
1144
1145 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1146 auto &DL = CS.getCaller()->getParent()->getDataLayout();
1147
1148 // To avoid inserting redundant assumptions, we should check for assumptions
1149 // already in the caller. To do this, we might need a DT of the caller.
1150 DominatorTree DT;
1151 bool DTCalculated = false;
1152
1153 Function *CalledFunc = CS.getCalledFunction();
1154 for (Argument &Arg : CalledFunc->args()) {
1155 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1156 if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1157 if (!DTCalculated) {
1158 DT.recalculate(*CS.getCaller());
1159 DTCalculated = true;
1160 }
1161
1162 // If we can already prove the asserted alignment in the context of the
1163 // caller, then don't bother inserting the assumption.
1164 Value *ArgVal = CS.getArgument(Arg.getArgNo());
1165 if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1166 continue;
1167
1168 CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1169 .CreateAlignmentAssumption(DL, ArgVal, Align);
1170 AC->registerAssumption(NewAsmp);
1171 }
1172 }
1173}
1174
1175/// Once we have cloned code over from a callee into the caller,
1176/// update the specified callgraph to reflect the changes we made.
1177/// Note that it's possible that not all code was copied over, so only
1178/// some edges of the callgraph may remain.
1179static void UpdateCallGraphAfterInlining(CallSite CS,
1180 Function::iterator FirstNewBlock,
1181 ValueToValueMapTy &VMap,
1182 InlineFunctionInfo &IFI) {
1183 CallGraph &CG = *IFI.CG;
1184 const Function *Caller = CS.getCaller();
1185 const Function *Callee = CS.getCalledFunction();
1186 CallGraphNode *CalleeNode = CG[Callee];
1187 CallGraphNode *CallerNode = CG[Caller];
1188
1189 // Since we inlined some uninlined call sites in the callee into the caller,
1190 // add edges from the caller to all of the callees of the callee.
1191 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1192
1193 // Consider the case where CalleeNode == CallerNode.
1194 CallGraphNode::CalledFunctionsVector CallCache;
1195 if (CalleeNode == CallerNode) {
1196 CallCache.assign(I, E);
1197 I = CallCache.begin();
1198 E = CallCache.end();
1199 }
1200
1201 for (; I != E; ++I) {
1202 const Value *OrigCall = I->first;
1203
1204 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1205 // Only copy the edge if the call was inlined!
1206 if (VMI == VMap.end() || VMI->second == nullptr)
1207 continue;
1208
1209 // If the call was inlined, but then constant folded, there is no edge to
1210 // add. Check for this case.
1211 auto *NewCall = dyn_cast<CallBase>(VMI->second);
1212 if (!NewCall)
1213 continue;
1214
1215 // We do not treat intrinsic calls like real function calls because we
1216 // expect them to become inline code; do not add an edge for an intrinsic.
1217 if (NewCall->getCalledFunction() &&
1218 NewCall->getCalledFunction()->isIntrinsic())
1219 continue;
1220
1221 // Remember that this call site got inlined for the client of
1222 // InlineFunction.
1223 IFI.InlinedCalls.push_back(NewCall);
1224
1225 // It's possible that inlining the callsite will cause it to go from an
1226 // indirect to a direct call by resolving a function pointer. If this
1227 // happens, set the callee of the new call site to a more precise
1228 // destination. This can also happen if the call graph node of the caller
1229 // was just unnecessarily imprecise.
1230 if (!I->second->getFunction())
1231 if (Function *F = NewCall->getCalledFunction()) {
1232 // Indirect call site resolved to direct call.
1233 CallerNode->addCalledFunction(NewCall, CG[F]);
1234
1235 continue;
1236 }
1237
1238 CallerNode->addCalledFunction(NewCall, I->second);
1239 }
1240
1241 // Update the call graph by deleting the edge from Callee to Caller. We must
1242 // do this after the loop above in case Caller and Callee are the same.
1243 CallerNode->removeCallEdgeFor(*cast<CallBase>(CS.getInstruction()));
1244}
1245
1246static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1247 BasicBlock *InsertBlock,
1248 InlineFunctionInfo &IFI) {
1249 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1250 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1251
1252 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1253
1254 // Always generate a memcpy of alignment 1 here because we don't know
1255 // the alignment of the src pointer. Other optimizations can infer
1256 // better alignment.
1257 Builder.CreateMemCpy(Dst, /*DstAlign*/ Align::None(), Src,
1258 /*SrcAlign*/ Align::None(), Size);
1259}
1260
1261/// When inlining a call site that has a byval argument,
1262/// we have to make the implicit memcpy explicit by adding it.
1263static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1264 const Function *CalledFunc,
1265 InlineFunctionInfo &IFI,
1266 unsigned ByValAlignment) {
1267 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1268 Type *AggTy = ArgTy->getElementType();
1269
1270 Function *Caller = TheCall->getFunction();
1271 const DataLayout &DL = Caller->getParent()->getDataLayout();
1272
1273 // If the called function is readonly, then it could not mutate the caller's
1274 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1275 // temporary.
1276 if (CalledFunc->onlyReadsMemory()) {
1277 // If the byval argument has a specified alignment that is greater than the
1278 // passed in pointer, then we either have to round up the input pointer or
1279 // give up on this transformation.
1280 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1281 return Arg;
1282
1283 AssumptionCache *AC =
1284 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1285
1286 // If the pointer is already known to be sufficiently aligned, or if we can
1287 // round it up to a larger alignment, then we don't need a temporary.
1288 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1289 ByValAlignment)
1290 return Arg;
1291
1292 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1293 // for code quality, but rarely happens and is required for correctness.
1294 }
1295
1296 // Create the alloca. If we have DataLayout, use nice alignment.
1297 Align Alignment(DL.getPrefTypeAlignment(AggTy));
1298
1299 // If the byval had an alignment specified, we *must* use at least that
1300 // alignment, as it is required by the byval argument (and uses of the
1301 // pointer inside the callee).
1302 Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1303
1304 Value *NewAlloca =
1305 new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1306 Arg->getName(), &*Caller->begin()->begin());
1307 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1308
1309 // Uses of the argument in the function should use our new alloca
1310 // instead.
1311 return NewAlloca;
1312}
1313
1314// Check whether this Value is used by a lifetime intrinsic.
1315static bool isUsedByLifetimeMarker(Value *V) {
1316 for (User *U : V->users())
1317 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1318 if (II->isLifetimeStartOrEnd())
1319 return true;
1320 return false;
1321}
1322
1323// Check whether the given alloca already has
1324// lifetime.start or lifetime.end intrinsics.
1325static bool hasLifetimeMarkers(AllocaInst *AI) {
1326 Type *Ty = AI->getType();
1327 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1328 Ty->getPointerAddressSpace());
1329 if (Ty == Int8PtrTy)
1330 return isUsedByLifetimeMarker(AI);
1331
1332 // Do a scan to find all the casts to i8*.
1333 for (User *U : AI->users()) {
1334 if (U->getType() != Int8PtrTy) continue;
1335 if (U->stripPointerCasts() != AI) continue;
1336 if (isUsedByLifetimeMarker(U))
1337 return true;
1338 }
1339 return false;
1340}
1341
1342/// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1343/// block. Allocas used in inalloca calls and allocas of dynamic array size
1344/// cannot be static.
1345static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1346 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1347}
1348
1349/// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1350/// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1351static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1352 LLVMContext &Ctx,
1353 DenseMap<const MDNode *, MDNode *> &IANodes) {
1354 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1355 return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1356 IA);
1357}
1358
1359/// Returns the LoopID for a loop which has has been cloned from another
1360/// function for inlining with the new inlined-at start and end locs.
1361static MDNode *inlineLoopID(const MDNode *OrigLoopId, DILocation *InlinedAt,
1362 LLVMContext &Ctx,
1363 DenseMap<const MDNode *, MDNode *> &IANodes) {
1364 assert(OrigLoopId && OrigLoopId->getNumOperands() > 0 &&((OrigLoopId && OrigLoopId->getNumOperands() > 0
&& "Loop ID needs at least one operand") ? static_cast
<void> (0) : __assert_fail ("OrigLoopId && OrigLoopId->getNumOperands() > 0 && \"Loop ID needs at least one operand\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 1365, __PRETTY_FUNCTION__))
1365 "Loop ID needs at least one operand")((OrigLoopId && OrigLoopId->getNumOperands() > 0
&& "Loop ID needs at least one operand") ? static_cast
<void> (0) : __assert_fail ("OrigLoopId && OrigLoopId->getNumOperands() > 0 && \"Loop ID needs at least one operand\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 1365, __PRETTY_FUNCTION__))
;
1366 assert(OrigLoopId && OrigLoopId->getOperand(0).get() == OrigLoopId &&((OrigLoopId && OrigLoopId->getOperand(0).get() ==
OrigLoopId && "Loop ID should refer to itself") ? static_cast
<void> (0) : __assert_fail ("OrigLoopId && OrigLoopId->getOperand(0).get() == OrigLoopId && \"Loop ID should refer to itself\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 1367, __PRETTY_FUNCTION__))
1367 "Loop ID should refer to itself")((OrigLoopId && OrigLoopId->getOperand(0).get() ==
OrigLoopId && "Loop ID should refer to itself") ? static_cast
<void> (0) : __assert_fail ("OrigLoopId && OrigLoopId->getOperand(0).get() == OrigLoopId && \"Loop ID should refer to itself\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 1367, __PRETTY_FUNCTION__))
;
1368
1369 // Save space for the self-referential LoopID.
1370 SmallVector<Metadata *, 4> MDs = {nullptr};
1371
1372 for (unsigned i = 1; i < OrigLoopId->getNumOperands(); ++i) {
1373 Metadata *MD = OrigLoopId->getOperand(i);
1374 // Update the DILocations to encode the inlined-at metadata.
1375 if (DILocation *DL = dyn_cast<DILocation>(MD))
1376 MDs.push_back(inlineDebugLoc(DL, InlinedAt, Ctx, IANodes));
1377 else
1378 MDs.push_back(MD);
1379 }
1380
1381 MDNode *NewLoopID = MDNode::getDistinct(Ctx, MDs);
1382 // Insert the self-referential LoopID.
1383 NewLoopID->replaceOperandWith(0, NewLoopID);
1384 return NewLoopID;
1385}
1386
1387/// Update inlined instructions' line numbers to
1388/// to encode location where these instructions are inlined.
1389static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1390 Instruction *TheCall, bool CalleeHasDebugInfo) {
1391 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1392 if (!TheCallDL)
1393 return;
1394
1395 auto &Ctx = Fn->getContext();
1396 DILocation *InlinedAtNode = TheCallDL;
1397
1398 // Create a unique call site, not to be confused with any other call from the
1399 // same location.
1400 InlinedAtNode = DILocation::getDistinct(
1401 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1402 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1403
1404 // Cache the inlined-at nodes as they're built so they are reused, without
1405 // this every instruction's inlined-at chain would become distinct from each
1406 // other.
1407 DenseMap<const MDNode *, MDNode *> IANodes;
1408
1409 // Check if we are not generating inline line tables and want to use
1410 // the call site location instead.
1411 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1412
1413 for (; FI != Fn->end(); ++FI) {
1414 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1415 BI != BE; ++BI) {
1416 // Loop metadata needs to be updated so that the start and end locs
1417 // reference inlined-at locations.
1418 if (MDNode *LoopID = BI->getMetadata(LLVMContext::MD_loop)) {
1419 MDNode *NewLoopID =
1420 inlineLoopID(LoopID, InlinedAtNode, BI->getContext(), IANodes);
1421 BI->setMetadata(LLVMContext::MD_loop, NewLoopID);
1422 }
1423
1424 if (!NoInlineLineTables)
1425 if (DebugLoc DL = BI->getDebugLoc()) {
1426 DebugLoc IDL =
1427 inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1428 BI->setDebugLoc(IDL);
1429 continue;
1430 }
1431
1432 if (CalleeHasDebugInfo && !NoInlineLineTables)
1433 continue;
1434
1435 // If the inlined instruction has no line number, or if inline info
1436 // is not being generated, make it look as if it originates from the call
1437 // location. This is important for ((__always_inline, __nodebug__))
1438 // functions which must use caller location for all instructions in their
1439 // function body.
1440
1441 // Don't update static allocas, as they may get moved later.
1442 if (auto *AI = dyn_cast<AllocaInst>(BI))
1443 if (allocaWouldBeStaticInEntry(AI))
1444 continue;
1445
1446 BI->setDebugLoc(TheCallDL);
1447 }
1448
1449 // Remove debug info intrinsics if we're not keeping inline info.
1450 if (NoInlineLineTables) {
1451 BasicBlock::iterator BI = FI->begin();
1452 while (BI != FI->end()) {
1453 if (isa<DbgInfoIntrinsic>(BI)) {
1454 BI = BI->eraseFromParent();
1455 continue;
1456 }
1457 ++BI;
1458 }
1459 }
1460
1461 }
1462}
1463
1464/// Update the block frequencies of the caller after a callee has been inlined.
1465///
1466/// Each block cloned into the caller has its block frequency scaled by the
1467/// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1468/// callee's entry block gets the same frequency as the callsite block and the
1469/// relative frequencies of all cloned blocks remain the same after cloning.
1470static void updateCallerBFI(BasicBlock *CallSiteBlock,
1471 const ValueToValueMapTy &VMap,
1472 BlockFrequencyInfo *CallerBFI,
1473 BlockFrequencyInfo *CalleeBFI,
1474 const BasicBlock &CalleeEntryBlock) {
1475 SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1476 for (auto Entry : VMap) {
1477 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1478 continue;
1479 auto *OrigBB = cast<BasicBlock>(Entry.first);
1480 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1481 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1482 if (!ClonedBBs.insert(ClonedBB).second) {
1483 // Multiple blocks in the callee might get mapped to one cloned block in
1484 // the caller since we prune the callee as we clone it. When that happens,
1485 // we want to use the maximum among the original blocks' frequencies.
1486 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1487 if (NewFreq > Freq)
1488 Freq = NewFreq;
1489 }
1490 CallerBFI->setBlockFreq(ClonedBB, Freq);
1491 }
1492 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1493 CallerBFI->setBlockFreqAndScale(
1494 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1495 ClonedBBs);
1496}
1497
1498/// Update the branch metadata for cloned call instructions.
1499static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1500 const ProfileCount &CalleeEntryCount,
1501 const Instruction *TheCall,
1502 ProfileSummaryInfo *PSI,
1503 BlockFrequencyInfo *CallerBFI) {
1504 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1505 CalleeEntryCount.getCount() < 1)
1506 return;
1507 auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1508 int64_t CallCount =
1509 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1510 CalleeEntryCount.getCount());
1511 updateProfileCallee(Callee, -CallCount, &VMap);
1512}
1513
1514void llvm::updateProfileCallee(
1515 Function *Callee, int64_t entryDelta,
1516 const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1517 auto CalleeCount = Callee->getEntryCount();
1518 if (!CalleeCount.hasValue())
1519 return;
1520
1521 uint64_t priorEntryCount = CalleeCount.getCount();
1522 uint64_t newEntryCount;
1523
1524 // Since CallSiteCount is an estimate, it could exceed the original callee
1525 // count and has to be set to 0 so guard against underflow.
1526 if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1527 newEntryCount = 0;
1528 else
1529 newEntryCount = priorEntryCount + entryDelta;
1530
1531 // During inlining ?
1532 if (VMap) {
1533 uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1534 for (auto Entry : *VMap)
1535 if (isa<CallInst>(Entry.first))
1536 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1537 CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1538 }
1539
1540 if (entryDelta) {
1541 Callee->setEntryCount(newEntryCount);
1542
1543 for (BasicBlock &BB : *Callee)
1544 // No need to update the callsite if it is pruned during inlining.
1545 if (!VMap || VMap->count(&BB))
1546 for (Instruction &I : BB)
1547 if (CallInst *CI = dyn_cast<CallInst>(&I))
1548 CI->updateProfWeight(newEntryCount, priorEntryCount);
1549 }
1550}
1551
1552/// This function inlines the called function into the basic block of the
1553/// caller. This returns false if it is not possible to inline this call.
1554/// The program is still in a well defined state if this occurs though.
1555///
1556/// Note that this only does one level of inlining. For example, if the
1557/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1558/// exists in the instruction stream. Similarly this will inline a recursive
1559/// function by one level.
1560llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1561 AAResults *CalleeAAR,
1562 bool InsertLifetime,
1563 Function *ForwardVarArgsTo) {
1564 Instruction *TheCall = CS.getInstruction();
1565 assert(TheCall->getParent() && TheCall->getFunction()((TheCall->getParent() && TheCall->getFunction(
) && "Instruction not in function!") ? static_cast<
void> (0) : __assert_fail ("TheCall->getParent() && TheCall->getFunction() && \"Instruction not in function!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 1566, __PRETTY_FUNCTION__))
1566 && "Instruction not in function!")((TheCall->getParent() && TheCall->getFunction(
) && "Instruction not in function!") ? static_cast<
void> (0) : __assert_fail ("TheCall->getParent() && TheCall->getFunction() && \"Instruction not in function!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 1566, __PRETTY_FUNCTION__))
;
1567
1568 // FIXME: we don't inline callbr yet.
1569 if (isa<CallBrInst>(TheCall))
1570 return false;
1571
1572 // If IFI has any state in it, zap it before we fill it in.
1573 IFI.reset();
1574
1575 Function *CalledFunc = CS.getCalledFunction();
1576 if (!CalledFunc || // Can't inline external function or indirect
1577 CalledFunc->isDeclaration()) // call!
1578 return "external or indirect";
1579
1580 // The inliner does not know how to inline through calls with operand bundles
1581 // in general ...
1582 if (CS.hasOperandBundles()) {
1583 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1584 uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1585 // ... but it knows how to inline through "deopt" operand bundles ...
1586 if (Tag == LLVMContext::OB_deopt)
1587 continue;
1588 // ... and "funclet" operand bundles.
1589 if (Tag == LLVMContext::OB_funclet)
1590 continue;
1591
1592 return "unsupported operand bundle";
1593 }
1594 }
1595
1596 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1597 // calls that we inline.
1598 bool MarkNoUnwind = CS.doesNotThrow();
1599
1600 BasicBlock *OrigBB = TheCall->getParent();
1601 Function *Caller = OrigBB->getParent();
1602
1603 // GC poses two hazards to inlining, which only occur when the callee has GC:
1604 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1605 // caller.
1606 // 2. If the caller has a differing GC, it is invalid to inline.
1607 if (CalledFunc->hasGC()) {
1608 if (!Caller->hasGC())
1609 Caller->setGC(CalledFunc->getGC());
1610 else if (CalledFunc->getGC() != Caller->getGC())
1611 return "incompatible GC";
1612 }
1613
1614 // Get the personality function from the callee if it contains a landing pad.
1615 Constant *CalledPersonality =
1616 CalledFunc->hasPersonalityFn()
1617 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1618 : nullptr;
1619
1620 // Find the personality function used by the landing pads of the caller. If it
1621 // exists, then check to see that it matches the personality function used in
1622 // the callee.
1623 Constant *CallerPersonality =
1624 Caller->hasPersonalityFn()
1625 ? Caller->getPersonalityFn()->stripPointerCasts()
1626 : nullptr;
1627 if (CalledPersonality) {
1628 if (!CallerPersonality)
1629 Caller->setPersonalityFn(CalledPersonality);
1630 // If the personality functions match, then we can perform the
1631 // inlining. Otherwise, we can't inline.
1632 // TODO: This isn't 100% true. Some personality functions are proper
1633 // supersets of others and can be used in place of the other.
1634 else if (CalledPersonality != CallerPersonality)
1635 return "incompatible personality";
1636 }
1637
1638 // We need to figure out which funclet the callsite was in so that we may
1639 // properly nest the callee.
1640 Instruction *CallSiteEHPad = nullptr;
1641 if (CallerPersonality) {
1642 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1643 if (isScopedEHPersonality(Personality)) {
1644 Optional<OperandBundleUse> ParentFunclet =
1645 CS.getOperandBundle(LLVMContext::OB_funclet);
1646 if (ParentFunclet)
1647 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1648
1649 // OK, the inlining site is legal. What about the target function?
1650
1651 if (CallSiteEHPad) {
1652 if (Personality == EHPersonality::MSVC_CXX) {
1653 // The MSVC personality cannot tolerate catches getting inlined into
1654 // cleanup funclets.
1655 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1656 // Ok, the call site is within a cleanuppad. Let's check the callee
1657 // for catchpads.
1658 for (const BasicBlock &CalledBB : *CalledFunc) {
1659 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1660 return "catch in cleanup funclet";
1661 }
1662 }
1663 } else if (isAsynchronousEHPersonality(Personality)) {
1664 // SEH is even less tolerant, there may not be any sort of exceptional
1665 // funclet in the callee.
1666 for (const BasicBlock &CalledBB : *CalledFunc) {
1667 if (CalledBB.isEHPad())
1668 return "SEH in cleanup funclet";
1669 }
1670 }
1671 }
1672 }
1673 }
1674
1675 // Determine if we are dealing with a call in an EHPad which does not unwind
1676 // to caller.
1677 bool EHPadForCallUnwindsLocally = false;
1678 if (CallSiteEHPad && CS.isCall()) {
1679 UnwindDestMemoTy FuncletUnwindMap;
1680 Value *CallSiteUnwindDestToken =
1681 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1682
1683 EHPadForCallUnwindsLocally =
1684 CallSiteUnwindDestToken &&
1685 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1686 }
1687
1688 // Get an iterator to the last basic block in the function, which will have
1689 // the new function inlined after it.
1690 Function::iterator LastBlock = --Caller->end();
1691
1692 // Make sure to capture all of the return instructions from the cloned
1693 // function.
1694 SmallVector<ReturnInst*, 8> Returns;
1695 ClonedCodeInfo InlinedFunctionInfo;
1696 Function::iterator FirstNewBlock;
1697
1698 { // Scope to destroy VMap after cloning.
1699 ValueToValueMapTy VMap;
1700 // Keep a list of pair (dst, src) to emit byval initializations.
1701 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1702
1703 auto &DL = Caller->getParent()->getDataLayout();
1704
1705 // Calculate the vector of arguments to pass into the function cloner, which
1706 // matches up the formal to the actual argument values.
1707 CallSite::arg_iterator AI = CS.arg_begin();
1708 unsigned ArgNo = 0;
1709 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1710 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1711 Value *ActualArg = *AI;
1712
1713 // When byval arguments actually inlined, we need to make the copy implied
1714 // by them explicit. However, we don't do this if the callee is readonly
1715 // or readnone, because the copy would be unneeded: the callee doesn't
1716 // modify the struct.
1717 if (CS.isByValArgument(ArgNo)) {
1718 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1719 CalledFunc->getParamAlignment(ArgNo));
1720 if (ActualArg != *AI)
1721 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1722 }
1723
1724 VMap[&*I] = ActualArg;
1725 }
1726
1727 // Add alignment assumptions if necessary. We do this before the inlined
1728 // instructions are actually cloned into the caller so that we can easily
1729 // check what will be known at the start of the inlined code.
1730 AddAlignmentAssumptions(CS, IFI);
1731
1732 // We want the inliner to prune the code as it copies. We would LOVE to
1733 // have no dead or constant instructions leftover after inlining occurs
1734 // (which can happen, e.g., because an argument was constant), but we'll be
1735 // happy with whatever the cloner can do.
1736 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1737 /*ModuleLevelChanges=*/false, Returns, ".i",
1738 &InlinedFunctionInfo, TheCall);
1739 // Remember the first block that is newly cloned over.
1740 FirstNewBlock = LastBlock; ++FirstNewBlock;
1741
1742 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1743 // Update the BFI of blocks cloned into the caller.
1744 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1745 CalledFunc->front());
1746
1747 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1748 IFI.PSI, IFI.CallerBFI);
1749
1750 // Inject byval arguments initialization.
1751 for (std::pair<Value*, Value*> &Init : ByValInit)
1752 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1753 &*FirstNewBlock, IFI);
1754
1755 Optional<OperandBundleUse> ParentDeopt =
1756 CS.getOperandBundle(LLVMContext::OB_deopt);
1757 if (ParentDeopt) {
1758 SmallVector<OperandBundleDef, 2> OpDefs;
1759
1760 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1761 Instruction *I = dyn_cast_or_null<Instruction>(VH);
1762 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1763
1764 OpDefs.clear();
1765
1766 CallSite ICS(I);
1767 OpDefs.reserve(ICS.getNumOperandBundles());
1768
1769 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1770 auto ChildOB = ICS.getOperandBundleAt(i);
1771 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1772 // If the inlined call has other operand bundles, let them be
1773 OpDefs.emplace_back(ChildOB);
1774 continue;
1775 }
1776
1777 // It may be useful to separate this logic (of handling operand
1778 // bundles) out to a separate "policy" component if this gets crowded.
1779 // Prepend the parent's deoptimization continuation to the newly
1780 // inlined call's deoptimization continuation.
1781 std::vector<Value *> MergedDeoptArgs;
1782 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1783 ChildOB.Inputs.size());
1784
1785 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1786 ParentDeopt->Inputs.begin(),
1787 ParentDeopt->Inputs.end());
1788 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1789 ChildOB.Inputs.end());
1790
1791 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1792 }
1793
1794 Instruction *NewI = nullptr;
1795 if (isa<CallInst>(I))
1796 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1797 else if (isa<CallBrInst>(I))
1798 NewI = CallBrInst::Create(cast<CallBrInst>(I), OpDefs, I);
1799 else
1800 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1801
1802 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1803 // this even if the call returns void.
1804 I->replaceAllUsesWith(NewI);
1805
1806 VH = nullptr;
1807 I->eraseFromParent();
1808 }
1809 }
1810
1811 // Update the callgraph if requested.
1812 if (IFI.CG)
1813 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1814
1815 // For 'nodebug' functions, the associated DISubprogram is always null.
1816 // Conservatively avoid propagating the callsite debug location to
1817 // instructions inlined from a function whose DISubprogram is not null.
1818 fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1819 CalledFunc->getSubprogram() != nullptr);
1820
1821 // Clone existing noalias metadata if necessary.
1822 CloneAliasScopeMetadata(CS, VMap);
1823
1824 // Add noalias metadata if necessary.
1825 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1826
1827 // Propagate llvm.mem.parallel_loop_access if necessary.
1828 PropagateParallelLoopAccessMetadata(CS, VMap);
1829
1830 // Register any cloned assumptions.
1831 if (IFI.GetAssumptionCache)
1832 for (BasicBlock &NewBlock :
1833 make_range(FirstNewBlock->getIterator(), Caller->end()))
1834 for (Instruction &I : NewBlock) {
1835 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1836 if (II->getIntrinsicID() == Intrinsic::assume)
1837 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1838 }
1839 }
1840
1841 // If there are any alloca instructions in the block that used to be the entry
1842 // block for the callee, move them to the entry block of the caller. First
1843 // calculate which instruction they should be inserted before. We insert the
1844 // instructions at the end of the current alloca list.
1845 {
1846 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1847 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1848 E = FirstNewBlock->end(); I != E; ) {
1849 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1850 if (!AI) continue;
1851
1852 // If the alloca is now dead, remove it. This often occurs due to code
1853 // specialization.
1854 if (AI->use_empty()) {
1855 AI->eraseFromParent();
1856 continue;
1857 }
1858
1859 if (!allocaWouldBeStaticInEntry(AI))
1860 continue;
1861
1862 // Keep track of the static allocas that we inline into the caller.
1863 IFI.StaticAllocas.push_back(AI);
1864
1865 // Scan for the block of allocas that we can move over, and move them
1866 // all at once.
1867 while (isa<AllocaInst>(I) &&
1868 !cast<AllocaInst>(I)->use_empty() &&
1869 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1870 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1871 ++I;
1872 }
1873
1874 // Transfer all of the allocas over in a block. Using splice means
1875 // that the instructions aren't removed from the symbol table, then
1876 // reinserted.
1877 Caller->getEntryBlock().getInstList().splice(
1878 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1879 }
1880 // Move any dbg.declares describing the allocas into the entry basic block.
1881 DIBuilder DIB(*Caller->getParent());
1882 for (auto &AI : IFI.StaticAllocas)
1883 replaceDbgDeclareForAlloca(AI, AI, DIB, DIExpression::ApplyOffset, 0);
1884 }
1885
1886 SmallVector<Value*,4> VarArgsToForward;
1887 SmallVector<AttributeSet, 4> VarArgsAttrs;
1888 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1889 i < CS.getNumArgOperands(); i++) {
1890 VarArgsToForward.push_back(CS.getArgOperand(i));
1891 VarArgsAttrs.push_back(CS.getAttributes().getParamAttributes(i));
1892 }
1893
1894 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1895 if (InlinedFunctionInfo.ContainsCalls) {
1896 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1897 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1898 CallSiteTailKind = CI->getTailCallKind();
1899
1900 // For inlining purposes, the "notail" marker is the same as no marker.
1901 if (CallSiteTailKind == CallInst::TCK_NoTail)
1902 CallSiteTailKind = CallInst::TCK_None;
1903
1904 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1905 ++BB) {
1906 for (auto II = BB->begin(); II != BB->end();) {
1907 Instruction &I = *II++;
1908 CallInst *CI = dyn_cast<CallInst>(&I);
1909 if (!CI)
1910 continue;
1911
1912 // Forward varargs from inlined call site to calls to the
1913 // ForwardVarArgsTo function, if requested, and to musttail calls.
1914 if (!VarArgsToForward.empty() &&
1915 ((ForwardVarArgsTo &&
1916 CI->getCalledFunction() == ForwardVarArgsTo) ||
1917 CI->isMustTailCall())) {
1918 // Collect attributes for non-vararg parameters.
1919 AttributeList Attrs = CI->getAttributes();
1920 SmallVector<AttributeSet, 8> ArgAttrs;
1921 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
1922 for (unsigned ArgNo = 0;
1923 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
1924 ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
1925 }
1926
1927 // Add VarArg attributes.
1928 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
1929 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
1930 Attrs.getRetAttributes(), ArgAttrs);
1931 // Add VarArgs to existing parameters.
1932 SmallVector<Value *, 6> Params(CI->arg_operands());
1933 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
1934 CallInst *NewCI = CallInst::Create(
1935 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
1936 NewCI->setDebugLoc(CI->getDebugLoc());
1937 NewCI->setAttributes(Attrs);
1938 NewCI->setCallingConv(CI->getCallingConv());
1939 CI->replaceAllUsesWith(NewCI);
1940 CI->eraseFromParent();
1941 CI = NewCI;
1942 }
1943
1944 if (Function *F = CI->getCalledFunction())
1945 InlinedDeoptimizeCalls |=
1946 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1947
1948 // We need to reduce the strength of any inlined tail calls. For
1949 // musttail, we have to avoid introducing potential unbounded stack
1950 // growth. For example, if functions 'f' and 'g' are mutually recursive
1951 // with musttail, we can inline 'g' into 'f' so long as we preserve
1952 // musttail on the cloned call to 'f'. If either the inlined call site
1953 // or the cloned call site is *not* musttail, the program already has
1954 // one frame of stack growth, so it's safe to remove musttail. Here is
1955 // a table of example transformations:
1956 //
1957 // f -> musttail g -> musttail f ==> f -> musttail f
1958 // f -> musttail g -> tail f ==> f -> tail f
1959 // f -> g -> musttail f ==> f -> f
1960 // f -> g -> tail f ==> f -> f
1961 //
1962 // Inlined notail calls should remain notail calls.
1963 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1964 if (ChildTCK != CallInst::TCK_NoTail)
1965 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1966 CI->setTailCallKind(ChildTCK);
1967 InlinedMustTailCalls |= CI->isMustTailCall();
1968
1969 // Calls inlined through a 'nounwind' call site should be marked
1970 // 'nounwind'.
1971 if (MarkNoUnwind)
1972 CI->setDoesNotThrow();
1973 }
1974 }
1975 }
1976
1977 // Leave lifetime markers for the static alloca's, scoping them to the
1978 // function we just inlined.
1979 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1980 IRBuilder<> builder(&FirstNewBlock->front());
1981 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1982 AllocaInst *AI = IFI.StaticAllocas[ai];
1983 // Don't mark swifterror allocas. They can't have bitcast uses.
1984 if (AI->isSwiftError())
1985 continue;
1986
1987 // If the alloca is already scoped to something smaller than the whole
1988 // function then there's no need to add redundant, less accurate markers.
1989 if (hasLifetimeMarkers(AI))
1990 continue;
1991
1992 // Try to determine the size of the allocation.
1993 ConstantInt *AllocaSize = nullptr;
1994 if (ConstantInt *AIArraySize =
1995 dyn_cast<ConstantInt>(AI->getArraySize())) {
1996 auto &DL = Caller->getParent()->getDataLayout();
1997 Type *AllocaType = AI->getAllocatedType();
1998 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1999 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2000
2001 // Don't add markers for zero-sized allocas.
2002 if (AllocaArraySize == 0)
2003 continue;
2004
2005 // Check that array size doesn't saturate uint64_t and doesn't
2006 // overflow when it's multiplied by type size.
2007 if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2008 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2009 AllocaTypeSize) {
2010 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2011 AllocaArraySize * AllocaTypeSize);
2012 }
2013 }
2014
2015 builder.CreateLifetimeStart(AI, AllocaSize);
2016 for (ReturnInst *RI : Returns) {
2017 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2018 // call and a return. The return kills all local allocas.
2019 if (InlinedMustTailCalls &&
2020 RI->getParent()->getTerminatingMustTailCall())
2021 continue;
2022 if (InlinedDeoptimizeCalls &&
2023 RI->getParent()->getTerminatingDeoptimizeCall())
2024 continue;
2025 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2026 }
2027 }
2028 }
2029
2030 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2031 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2032 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2033 Module *M = Caller->getParent();
2034 // Get the two intrinsics we care about.
2035 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2036 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2037
2038 // Insert the llvm.stacksave.
2039 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2040 .CreateCall(StackSave, {}, "savedstack");
2041
2042 // Insert a call to llvm.stackrestore before any return instructions in the
2043 // inlined function.
2044 for (ReturnInst *RI : Returns) {
2045 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2046 // call and a return. The return will restore the stack pointer.
2047 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2048 continue;
2049 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2050 continue;
2051 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2052 }
2053 }
2054
2055 // If we are inlining for an invoke instruction, we must make sure to rewrite
2056 // any call instructions into invoke instructions. This is sensitive to which
2057 // funclet pads were top-level in the inlinee, so must be done before
2058 // rewriting the "parent pad" links.
2059 if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
2060 BasicBlock *UnwindDest = II->getUnwindDest();
2061 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2062 if (isa<LandingPadInst>(FirstNonPHI)) {
2063 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2064 } else {
2065 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2066 }
2067 }
2068
2069 // Update the lexical scopes of the new funclets and callsites.
2070 // Anything that had 'none' as its parent is now nested inside the callsite's
2071 // EHPad.
2072
2073 if (CallSiteEHPad) {
2074 for (Function::iterator BB = FirstNewBlock->getIterator(),
2075 E = Caller->end();
2076 BB != E; ++BB) {
2077 // Add bundle operands to any top-level call sites.
2078 SmallVector<OperandBundleDef, 1> OpBundles;
2079 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2080 Instruction *I = &*BBI++;
2081 CallSite CS(I);
2082 if (!CS)
2083 continue;
2084
2085 // Skip call sites which are nounwind intrinsics.
2086 auto *CalledFn =
2087 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2088 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
2089 continue;
2090
2091 // Skip call sites which already have a "funclet" bundle.
2092 if (CS.getOperandBundle(LLVMContext::OB_funclet))
2093 continue;
2094
2095 CS.getOperandBundlesAsDefs(OpBundles);
2096 OpBundles.emplace_back("funclet", CallSiteEHPad);
2097
2098 Instruction *NewInst;
2099 if (CS.isCall())
2100 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
2101 else if (CS.isCallBr())
2102 NewInst = CallBrInst::Create(cast<CallBrInst>(I), OpBundles, I);
2103 else
2104 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2105 NewInst->takeName(I);
2106 I->replaceAllUsesWith(NewInst);
2107 I->eraseFromParent();
2108
2109 OpBundles.clear();
2110 }
2111
2112 // It is problematic if the inlinee has a cleanupret which unwinds to
2113 // caller and we inline it into a call site which doesn't unwind but into
2114 // an EH pad that does. Such an edge must be dynamically unreachable.
2115 // As such, we replace the cleanupret with unreachable.
2116 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2117 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2118 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2119
2120 Instruction *I = BB->getFirstNonPHI();
2121 if (!I->isEHPad())
2122 continue;
2123
2124 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2125 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2126 CatchSwitch->setParentPad(CallSiteEHPad);
2127 } else {
2128 auto *FPI = cast<FuncletPadInst>(I);
2129 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2130 FPI->setParentPad(CallSiteEHPad);
2131 }
2132 }
2133 }
2134
2135 if (InlinedDeoptimizeCalls) {
2136 // We need to at least remove the deoptimizing returns from the Return set,
2137 // so that the control flow from those returns does not get merged into the
2138 // caller (but terminate it instead). If the caller's return type does not
2139 // match the callee's return type, we also need to change the return type of
2140 // the intrinsic.
2141 if (Caller->getReturnType() == TheCall->getType()) {
2142 auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2143 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2144 });
2145 Returns.erase(NewEnd, Returns.end());
2146 } else {
2147 SmallVector<ReturnInst *, 8> NormalReturns;
2148 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2149 Caller->getParent(), Intrinsic::experimental_deoptimize,
2150 {Caller->getReturnType()});
2151
2152 for (ReturnInst *RI : Returns) {
2153 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2154 if (!DeoptCall) {
2155 NormalReturns.push_back(RI);
2156 continue;
2157 }
2158
2159 // The calling convention on the deoptimize call itself may be bogus,
2160 // since the code we're inlining may have undefined behavior (and may
2161 // never actually execute at runtime); but all
2162 // @llvm.experimental.deoptimize declarations have to have the same
2163 // calling convention in a well-formed module.
2164 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2165 NewDeoptIntrinsic->setCallingConv(CallingConv);
2166 auto *CurBB = RI->getParent();
2167 RI->eraseFromParent();
2168
2169 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2170 DeoptCall->arg_end());
2171
2172 SmallVector<OperandBundleDef, 1> OpBundles;
2173 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2174 DeoptCall->eraseFromParent();
2175 assert(!OpBundles.empty() &&((!OpBundles.empty() && "Expected at least the deopt operand bundle"
) ? static_cast<void> (0) : __assert_fail ("!OpBundles.empty() && \"Expected at least the deopt operand bundle\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 2176, __PRETTY_FUNCTION__))
2176 "Expected at least the deopt operand bundle")((!OpBundles.empty() && "Expected at least the deopt operand bundle"
) ? static_cast<void> (0) : __assert_fail ("!OpBundles.empty() && \"Expected at least the deopt operand bundle\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 2176, __PRETTY_FUNCTION__))
;
2177
2178 IRBuilder<> Builder(CurBB);
2179 CallInst *NewDeoptCall =
2180 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2181 NewDeoptCall->setCallingConv(CallingConv);
2182 if (NewDeoptCall->getType()->isVoidTy())
2183 Builder.CreateRetVoid();
2184 else
2185 Builder.CreateRet(NewDeoptCall);
2186 }
2187
2188 // Leave behind the normal returns so we can merge control flow.
2189 std::swap(Returns, NormalReturns);
2190 }
2191 }
2192
2193 // Handle any inlined musttail call sites. In order for a new call site to be
2194 // musttail, the source of the clone and the inlined call site must have been
2195 // musttail. Therefore it's safe to return without merging control into the
2196 // phi below.
2197 if (InlinedMustTailCalls) {
2198 // Check if we need to bitcast the result of any musttail calls.
2199 Type *NewRetTy = Caller->getReturnType();
2200 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2201
2202 // Handle the returns preceded by musttail calls separately.
2203 SmallVector<ReturnInst *, 8> NormalReturns;
2204 for (ReturnInst *RI : Returns) {
2205 CallInst *ReturnedMustTail =
2206 RI->getParent()->getTerminatingMustTailCall();
2207 if (!ReturnedMustTail) {
2208 NormalReturns.push_back(RI);
2209 continue;
2210 }
2211 if (!NeedBitCast)
2212 continue;
2213
2214 // Delete the old return and any preceding bitcast.
2215 BasicBlock *CurBB = RI->getParent();
2216 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2217 RI->eraseFromParent();
2218 if (OldCast)
2219 OldCast->eraseFromParent();
2220
2221 // Insert a new bitcast and return with the right type.
2222 IRBuilder<> Builder(CurBB);
2223 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2224 }
2225
2226 // Leave behind the normal returns so we can merge control flow.
2227 std::swap(Returns, NormalReturns);
2228 }
2229
2230 // Now that all of the transforms on the inlined code have taken place but
2231 // before we splice the inlined code into the CFG and lose track of which
2232 // blocks were actually inlined, collect the call sites. We only do this if
2233 // call graph updates weren't requested, as those provide value handle based
2234 // tracking of inlined call sites instead.
2235 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2236 // Otherwise just collect the raw call sites that were inlined.
2237 for (BasicBlock &NewBB :
2238 make_range(FirstNewBlock->getIterator(), Caller->end()))
2239 for (Instruction &I : NewBB)
2240 if (auto CS = CallSite(&I))
2241 IFI.InlinedCallSites.push_back(CS);
2242 }
2243
2244 // If we cloned in _exactly one_ basic block, and if that block ends in a
2245 // return instruction, we splice the body of the inlined callee directly into
2246 // the calling basic block.
2247 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2248 // Move all of the instructions right before the call.
2249 OrigBB->getInstList().splice(TheCall->getIterator(),
2250 FirstNewBlock->getInstList(),
2251 FirstNewBlock->begin(), FirstNewBlock->end());
2252 // Remove the cloned basic block.
2253 Caller->getBasicBlockList().pop_back();
2254
2255 // If the call site was an invoke instruction, add a branch to the normal
2256 // destination.
2257 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2258 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2259 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2260 }
2261
2262 // If the return instruction returned a value, replace uses of the call with
2263 // uses of the returned value.
2264 if (!TheCall->use_empty()) {
2265 ReturnInst *R = Returns[0];
2266 if (TheCall == R->getReturnValue())
2267 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2268 else
2269 TheCall->replaceAllUsesWith(R->getReturnValue());
2270 }
2271 // Since we are now done with the Call/Invoke, we can delete it.
2272 TheCall->eraseFromParent();
2273
2274 // Since we are now done with the return instruction, delete it also.
2275 Returns[0]->eraseFromParent();
2276
2277 // We are now done with the inlining.
2278 return true;
2279 }
2280
2281 // Otherwise, we have the normal case, of more than one block to inline or
2282 // multiple return sites.
2283
2284 // We want to clone the entire callee function into the hole between the
2285 // "starter" and "ender" blocks. How we accomplish this depends on whether
2286 // this is an invoke instruction or a call instruction.
2287 BasicBlock *AfterCallBB;
2288 BranchInst *CreatedBranchToNormalDest = nullptr;
2289 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2290
2291 // Add an unconditional branch to make this look like the CallInst case...
2292 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2293
2294 // Split the basic block. This guarantees that no PHI nodes will have to be
2295 // updated due to new incoming edges, and make the invoke case more
2296 // symmetric to the call case.
2297 AfterCallBB =
2298 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2299 CalledFunc->getName() + ".exit");
2300
2301 } else { // It's a call
2302 // If this is a call instruction, we need to split the basic block that
2303 // the call lives in.
2304 //
2305 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2306 CalledFunc->getName() + ".exit");
2307 }
2308
2309 if (IFI.CallerBFI) {
2310 // Copy original BB's block frequency to AfterCallBB
2311 IFI.CallerBFI->setBlockFreq(
2312 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2313 }
2314
2315 // Change the branch that used to go to AfterCallBB to branch to the first
2316 // basic block of the inlined function.
2317 //
2318 Instruction *Br = OrigBB->getTerminator();
2319 assert(Br && Br->getOpcode() == Instruction::Br &&((Br && Br->getOpcode() == Instruction::Br &&
"splitBasicBlock broken!") ? static_cast<void> (0) : __assert_fail
("Br && Br->getOpcode() == Instruction::Br && \"splitBasicBlock broken!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 2320, __PRETTY_FUNCTION__))
2320 "splitBasicBlock broken!")((Br && Br->getOpcode() == Instruction::Br &&
"splitBasicBlock broken!") ? static_cast<void> (0) : __assert_fail
("Br && Br->getOpcode() == Instruction::Br && \"splitBasicBlock broken!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 2320, __PRETTY_FUNCTION__))
;
2321 Br->setOperand(0, &*FirstNewBlock);
2322
2323 // Now that the function is correct, make it a little bit nicer. In
2324 // particular, move the basic blocks inserted from the end of the function
2325 // into the space made by splitting the source basic block.
2326 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2327 Caller->getBasicBlockList(), FirstNewBlock,
2328 Caller->end());
2329
2330 // Handle all of the return instructions that we just cloned in, and eliminate
2331 // any users of the original call/invoke instruction.
2332 Type *RTy = CalledFunc->getReturnType();
2333
2334 PHINode *PHI = nullptr;
2335 if (Returns.size() > 1) {
2336 // The PHI node should go at the front of the new basic block to merge all
2337 // possible incoming values.
2338 if (!TheCall->use_empty()) {
2339 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2340 &AfterCallBB->front());
2341 // Anything that used the result of the function call should now use the
2342 // PHI node as their operand.
2343 TheCall->replaceAllUsesWith(PHI);
2344 }
2345
2346 // Loop over all of the return instructions adding entries to the PHI node
2347 // as appropriate.
2348 if (PHI) {
2349 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2350 ReturnInst *RI = Returns[i];
2351 assert(RI->getReturnValue()->getType() == PHI->getType() &&((RI->getReturnValue()->getType() == PHI->getType() &&
"Ret value not consistent in function!") ? static_cast<void
> (0) : __assert_fail ("RI->getReturnValue()->getType() == PHI->getType() && \"Ret value not consistent in function!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 2352, __PRETTY_FUNCTION__))
2352 "Ret value not consistent in function!")((RI->getReturnValue()->getType() == PHI->getType() &&
"Ret value not consistent in function!") ? static_cast<void
> (0) : __assert_fail ("RI->getReturnValue()->getType() == PHI->getType() && \"Ret value not consistent in function!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 2352, __PRETTY_FUNCTION__))
;
2353 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2354 }
2355 }
2356
2357 // Add a branch to the merge points and remove return instructions.
2358 DebugLoc Loc;
2359 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2360 ReturnInst *RI = Returns[i];
2361 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2362 Loc = RI->getDebugLoc();
2363 BI->setDebugLoc(Loc);
2364 RI->eraseFromParent();
2365 }
2366 // We need to set the debug location to *somewhere* inside the
2367 // inlined function. The line number may be nonsensical, but the
2368 // instruction will at least be associated with the right
2369 // function.
2370 if (CreatedBranchToNormalDest)
2371 CreatedBranchToNormalDest->setDebugLoc(Loc);
2372 } else if (!Returns.empty()) {
2373 // Otherwise, if there is exactly one return value, just replace anything
2374 // using the return value of the call with the computed value.
2375 if (!TheCall->use_empty()) {
2376 if (TheCall == Returns[0]->getReturnValue())
2377 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2378 else
2379 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2380 }
2381
2382 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2383 BasicBlock *ReturnBB = Returns[0]->getParent();
2384 ReturnBB->replaceAllUsesWith(AfterCallBB);
2385
2386 // Splice the code from the return block into the block that it will return
2387 // to, which contains the code that was after the call.
2388 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2389 ReturnBB->getInstList());
2390
2391 if (CreatedBranchToNormalDest)
2392 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2393
2394 // Delete the return instruction now and empty ReturnBB now.
2395 Returns[0]->eraseFromParent();
2396 ReturnBB->eraseFromParent();
2397 } else if (!TheCall->use_empty()) {
2398 // No returns, but something is using the return value of the call. Just
2399 // nuke the result.
2400 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2401 }
2402
2403 // Since we are now done with the Call/Invoke, we can delete it.
2404 TheCall->eraseFromParent();
2405
2406 // If we inlined any musttail calls and the original return is now
2407 // unreachable, delete it. It can only contain a bitcast and ret.
2408 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2409 AfterCallBB->eraseFromParent();
2410
2411 // We should always be able to fold the entry block of the function into the
2412 // single predecessor of the block...
2413 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!")((cast<BranchInst>(Br)->isUnconditional() &&
"splitBasicBlock broken!") ? static_cast<void> (0) : __assert_fail
("cast<BranchInst>(Br)->isUnconditional() && \"splitBasicBlock broken!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Transforms/Utils/InlineFunction.cpp"
, 2413, __PRETTY_FUNCTION__))
;
2414 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2415
2416 // Splice the code entry block into calling block, right before the
2417 // unconditional branch.
2418 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2419 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2420
2421 // Remove the unconditional branch.
2422 OrigBB->getInstList().erase(Br);
2423
2424 // Now we can remove the CalleeEntry block, which is now empty.
2425 Caller->getBasicBlockList().erase(CalleeEntry);
2426
2427 // If we inserted a phi node, check to see if it has a single value (e.g. all
2428 // the entries are the same or undef). If so, remove the PHI so it doesn't
2429 // block other optimizations.
2430 if (PHI) {
2431 AssumptionCache *AC =
2432 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2433 auto &DL = Caller->getParent()->getDataLayout();
2434 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2435 PHI->replaceAllUsesWith(V);
2436 PHI->eraseFromParent();
2437 }
2438 }
2439
2440 return true;
2441}

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/ilist_iterator.h

1//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_ADT_ILIST_ITERATOR_H
10#define LLVM_ADT_ILIST_ITERATOR_H
11
12#include "llvm/ADT/ilist_node.h"
13#include <cassert>
14#include <cstddef>
15#include <iterator>
16#include <type_traits>
17
18namespace llvm {
19
20namespace ilist_detail {
21
22/// Find const-correct node types.
23template <class OptionsT, bool IsConst> struct IteratorTraits;
24template <class OptionsT> struct IteratorTraits<OptionsT, false> {
25 using value_type = typename OptionsT::value_type;
26 using pointer = typename OptionsT::pointer;
27 using reference = typename OptionsT::reference;
28 using node_pointer = ilist_node_impl<OptionsT> *;
29 using node_reference = ilist_node_impl<OptionsT> &;
30};
31template <class OptionsT> struct IteratorTraits<OptionsT, true> {
32 using value_type = const typename OptionsT::value_type;
33 using pointer = typename OptionsT::const_pointer;
34 using reference = typename OptionsT::const_reference;
35 using node_pointer = const ilist_node_impl<OptionsT> *;
36 using node_reference = const ilist_node_impl<OptionsT> &;
37};
38
39template <bool IsReverse> struct IteratorHelper;
40template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
41 using Access = ilist_detail::NodeAccess;
42
43 template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
44 template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
45};
46template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
47 using Access = ilist_detail::NodeAccess;
48
49 template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
50 template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
51};
52
53} // end namespace ilist_detail
54
55/// Iterator for intrusive lists based on ilist_node.
56template <class OptionsT, bool IsReverse, bool IsConst>
57class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
58 friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
59 friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
60 friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
61
62 using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
63 using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
64
65public:
66 using value_type = typename Traits::value_type;
67 using pointer = typename Traits::pointer;
68 using reference = typename Traits::reference;
69 using difference_type = ptrdiff_t;
70 using iterator_category = std::bidirectional_iterator_tag;
71 using const_pointer = typename OptionsT::const_pointer;
72 using const_reference = typename OptionsT::const_reference;
73
74private:
75 using node_pointer = typename Traits::node_pointer;
76 using node_reference = typename Traits::node_reference;
77
78 node_pointer NodePtr = nullptr;
79
80public:
81 /// Create from an ilist_node.
82 explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
83
84 explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
85 explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
86 ilist_iterator() = default;
87
88 // This is templated so that we can allow constructing a const iterator from
89 // a nonconst iterator...
90 template <bool RHSIsConst>
91 ilist_iterator(
92 const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
93 typename std::enable_if<IsConst || !RHSIsConst, void *>::type = nullptr)
94 : NodePtr(RHS.NodePtr) {}
95
96 // This is templated so that we can allow assigning to a const iterator from
97 // a nonconst iterator...
98 template <bool RHSIsConst>
99 typename std::enable_if<IsConst || !RHSIsConst, ilist_iterator &>::type
100 operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
101 NodePtr = RHS.NodePtr;
102 return *this;
103 }
104
105 /// Explicit conversion between forward/reverse iterators.
106 ///
107 /// Translate between forward and reverse iterators without changing range
108 /// boundaries. The resulting iterator will dereference (and have a handle)
109 /// to the previous node, which is somewhat unexpected; but converting the
110 /// two endpoints in a range will give the same range in reverse.
111 ///
112 /// This matches std::reverse_iterator conversions.
113 explicit ilist_iterator(
114 const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
115 : ilist_iterator(++RHS.getReverse()) {}
116
117 /// Get a reverse iterator to the same node.
118 ///
119 /// Gives a reverse iterator that will dereference (and have a handle) to the
120 /// same node. Converting the endpoint iterators in a range will give a
121 /// different range; for range operations, use the explicit conversions.
122 ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
123 if (NodePtr)
124 return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
125 return ilist_iterator<OptionsT, !IsReverse, IsConst>();
126 }
127
128 /// Const-cast.
129 ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
130 if (NodePtr)
131 return ilist_iterator<OptionsT, IsReverse, false>(
132 const_cast<typename ilist_iterator<OptionsT, IsReverse,
133 false>::node_reference>(*NodePtr));
134 return ilist_iterator<OptionsT, IsReverse, false>();
135 }
136
137 // Accessors...
138 reference operator*() const {
139 assert(!NodePtr->isKnownSentinel())((!NodePtr->isKnownSentinel()) ? static_cast<void> (
0) : __assert_fail ("!NodePtr->isKnownSentinel()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/ADT/ilist_iterator.h"
, 139, __PRETTY_FUNCTION__))
;
140 return *Access::getValuePtr(NodePtr);
141 }
142 pointer operator->() const { return &operator*(); }
143
144 // Comparison operators
145 friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
146 return LHS.NodePtr == RHS.NodePtr;
147 }
148 friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
149 return LHS.NodePtr != RHS.NodePtr;
3
Assuming 'LHS.NodePtr' is not equal to 'RHS.NodePtr'
4
Returning the value 1, which participates in a condition later
12
Assuming 'LHS.NodePtr' is not equal to 'RHS.NodePtr'
13
Returning the value 1, which participates in a condition later
150 }
151
152 // Increment and decrement operators...
153 ilist_iterator &operator--() {
154 NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
155 return *this;
156 }
157 ilist_iterator &operator++() {
158 NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
159 return *this;
160 }
161 ilist_iterator operator--(int) {
162 ilist_iterator tmp = *this;
163 --*this;
164 return tmp;
165 }
166 ilist_iterator operator++(int) {
167 ilist_iterator tmp = *this;
168 ++*this;
169 return tmp;
170 }
171
172 /// Get the underlying ilist_node.
173 node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
174
175 /// Check for end. Only valid if ilist_sentinel_tracking<true>.
176 bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
177};
178
179template <typename From> struct simplify_type;
180
181/// Allow ilist_iterators to convert into pointers to a node automatically when
182/// used by the dyn_cast, cast, isa mechanisms...
183///
184/// FIXME: remove this, since there is no implicit conversion to NodeTy.
185template <class OptionsT, bool IsConst>
186struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
187 using iterator = ilist_iterator<OptionsT, false, IsConst>;
188 using SimpleType = typename iterator::pointer;
189
190 static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
191};
192template <class OptionsT, bool IsConst>
193struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
194 : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
195
196} // end namespace llvm
197
198#endif // LLVM_ADT_ILIST_ITERATOR_H

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t s) {
72 return User::operator new(s, 1);
73 }
74
75 /// Transparently provide more efficient getOperand methods.
76 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
77
78 // Methods for support type inquiry through isa, cast, and dyn_cast:
79 static bool classof(const Instruction *I) {
80 return I->isUnaryOp() ||
81 I->getOpcode() == Instruction::Alloca ||
82 I->getOpcode() == Instruction::Load ||
83 I->getOpcode() == Instruction::VAArg ||
84 I->getOpcode() == Instruction::ExtractValue ||
85 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
86 }
87 static bool classof(const Value *V) {
88 return isa<Instruction>(V) && classof(cast<Instruction>(V));
89 }
90};
91
92template <>
93struct OperandTraits<UnaryInstruction> :
94 public FixedNumOperandTraits<UnaryInstruction, 1> {
95};
96
97DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this))[i_nocapture].get()); } void UnaryInstruction
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<UnaryInstruction>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<UnaryInstruction>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 97, __PRETTY_FUNCTION__)); OperandTraits<UnaryInstruction
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
UnaryInstruction::getNumOperands() const { return OperandTraits
<UnaryInstruction>::operands(this); } template <int Idx_nocapture
> Use &UnaryInstruction::Op() { return this->OpFrom
<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &UnaryInstruction::Op() const { return this
->OpFrom<Idx_nocapture>(this); }
98
99//===----------------------------------------------------------------------===//
100// UnaryOperator Class
101//===----------------------------------------------------------------------===//
102
103class UnaryOperator : public UnaryInstruction {
104 void AssertOK();
105
106protected:
107 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
108 const Twine &Name, Instruction *InsertBefore);
109 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
110 const Twine &Name, BasicBlock *InsertAtEnd);
111
112 // Note: Instruction needs to be a friend here to call cloneImpl.
113 friend class Instruction;
114
115 UnaryOperator *cloneImpl() const;
116
117public:
118
119 /// Construct a unary instruction, given the opcode and an operand.
120 /// Optionally (if InstBefore is specified) insert the instruction
121 /// into a BasicBlock right before the specified instruction. The specified
122 /// Instruction is allowed to be a dereferenced end iterator.
123 ///
124 static UnaryOperator *Create(UnaryOps Op, Value *S,
125 const Twine &Name = Twine(),
126 Instruction *InsertBefore = nullptr);
127
128 /// Construct a unary instruction, given the opcode and an operand.
129 /// Also automatically insert this instruction to the end of the
130 /// BasicBlock specified.
131 ///
132 static UnaryOperator *Create(UnaryOps Op, Value *S,
133 const Twine &Name,
134 BasicBlock *InsertAtEnd);
135
136 /// These methods just forward to Create, and are useful when you
137 /// statically know what type of instruction you're going to create. These
138 /// helpers just save some typing.
139#define HANDLE_UNARY_INST(N, OPC, CLASS) \
140 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
141 return Create(Instruction::OPC, V, Name);\
142 }
143#include "llvm/IR/Instruction.def"
144#define HANDLE_UNARY_INST(N, OPC, CLASS) \
145 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
146 BasicBlock *BB) {\
147 return Create(Instruction::OPC, V, Name, BB);\
148 }
149#include "llvm/IR/Instruction.def"
150#define HANDLE_UNARY_INST(N, OPC, CLASS) \
151 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
152 Instruction *I) {\
153 return Create(Instruction::OPC, V, Name, I);\
154 }
155#include "llvm/IR/Instruction.def"
156
157 static UnaryOperator *CreateWithCopiedFlags(UnaryOps Opc,
158 Value *V,
159 Instruction *CopyO,
160 const Twine &Name = "") {
161 UnaryOperator *UO = Create(Opc, V, Name);
162 UO->copyIRFlags(CopyO);
163 return UO;
164 }
165
166 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
167 const Twine &Name = "") {
168 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name);
169 }
170
171 UnaryOps getOpcode() const {
172 return static_cast<UnaryOps>(Instruction::getOpcode());
173 }
174
175 // Methods for support type inquiry through isa, cast, and dyn_cast:
176 static bool classof(const Instruction *I) {
177 return I->isUnaryOp();
178 }
179 static bool classof(const Value *V) {
180 return isa<Instruction>(V) && classof(cast<Instruction>(V));
181 }
182};
183
184//===----------------------------------------------------------------------===//
185// BinaryOperator Class
186//===----------------------------------------------------------------------===//
187
188class BinaryOperator : public Instruction {
189 void AssertOK();
190
191protected:
192 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
193 const Twine &Name, Instruction *InsertBefore);
194 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
195 const Twine &Name, BasicBlock *InsertAtEnd);
196
197 // Note: Instruction needs to be a friend here to call cloneImpl.
198 friend class Instruction;
199
200 BinaryOperator *cloneImpl() const;
201
202public:
203 // allocate space for exactly two operands
204 void *operator new(size_t s) {
205 return User::operator new(s, 2);
206 }
207
208 /// Transparently provide more efficient getOperand methods.
209 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
210
211 /// Construct a binary instruction, given the opcode and the two
212 /// operands. Optionally (if InstBefore is specified) insert the instruction
213 /// into a BasicBlock right before the specified instruction. The specified
214 /// Instruction is allowed to be a dereferenced end iterator.
215 ///
216 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
217 const Twine &Name = Twine(),
218 Instruction *InsertBefore = nullptr);
219
220 /// Construct a binary instruction, given the opcode and the two
221 /// operands. Also automatically insert this instruction to the end of the
222 /// BasicBlock specified.
223 ///
224 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
225 const Twine &Name, BasicBlock *InsertAtEnd);
226
227 /// These methods just forward to Create, and are useful when you
228 /// statically know what type of instruction you're going to create. These
229 /// helpers just save some typing.
230#define HANDLE_BINARY_INST(N, OPC, CLASS) \
231 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
232 const Twine &Name = "") {\
233 return Create(Instruction::OPC, V1, V2, Name);\
234 }
235#include "llvm/IR/Instruction.def"
236#define HANDLE_BINARY_INST(N, OPC, CLASS) \
237 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
238 const Twine &Name, BasicBlock *BB) {\
239 return Create(Instruction::OPC, V1, V2, Name, BB);\
240 }
241#include "llvm/IR/Instruction.def"
242#define HANDLE_BINARY_INST(N, OPC, CLASS) \
243 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
244 const Twine &Name, Instruction *I) {\
245 return Create(Instruction::OPC, V1, V2, Name, I);\
246 }
247#include "llvm/IR/Instruction.def"
248
249 static BinaryOperator *CreateWithCopiedFlags(BinaryOps Opc,
250 Value *V1, Value *V2,
251 Instruction *CopyO,
252 const Twine &Name = "") {
253 BinaryOperator *BO = Create(Opc, V1, V2, Name);
254 BO->copyIRFlags(CopyO);
255 return BO;
256 }
257
258 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
259 Instruction *FMFSource,
260 const Twine &Name = "") {
261 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
262 }
263 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
264 Instruction *FMFSource,
265 const Twine &Name = "") {
266 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
267 }
268 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
269 Instruction *FMFSource,
270 const Twine &Name = "") {
271 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
272 }
273 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
274 Instruction *FMFSource,
275 const Twine &Name = "") {
276 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
277 }
278 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
279 Instruction *FMFSource,
280 const Twine &Name = "") {
281 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
282 }
283 static BinaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
284 const Twine &Name = "") {
285 Value *Zero = ConstantFP::getNegativeZero(Op->getType());
286 return CreateWithCopiedFlags(Instruction::FSub, Zero, Op, FMFSource, Name);
287 }
288
289 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
290 const Twine &Name = "") {
291 BinaryOperator *BO = Create(Opc, V1, V2, Name);
292 BO->setHasNoSignedWrap(true);
293 return BO;
294 }
295 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
296 const Twine &Name, BasicBlock *BB) {
297 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
298 BO->setHasNoSignedWrap(true);
299 return BO;
300 }
301 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
302 const Twine &Name, Instruction *I) {
303 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
304 BO->setHasNoSignedWrap(true);
305 return BO;
306 }
307
308 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
309 const Twine &Name = "") {
310 BinaryOperator *BO = Create(Opc, V1, V2, Name);
311 BO->setHasNoUnsignedWrap(true);
312 return BO;
313 }
314 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
315 const Twine &Name, BasicBlock *BB) {
316 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
317 BO->setHasNoUnsignedWrap(true);
318 return BO;
319 }
320 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
321 const Twine &Name, Instruction *I) {
322 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
323 BO->setHasNoUnsignedWrap(true);
324 return BO;
325 }
326
327 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
328 const Twine &Name = "") {
329 BinaryOperator *BO = Create(Opc, V1, V2, Name);
330 BO->setIsExact(true);
331 return BO;
332 }
333 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
334 const Twine &Name, BasicBlock *BB) {
335 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
336 BO->setIsExact(true);
337 return BO;
338 }
339 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
340 const Twine &Name, Instruction *I) {
341 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
342 BO->setIsExact(true);
343 return BO;
344 }
345
346#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
347 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
348 const Twine &Name = "") { \
349 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
350 } \
351 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
352 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
353 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
354 } \
355 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
356 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
357 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
358 }
359
360 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
361 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
362 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
363 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
364 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
365 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
366 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
367 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
368
369 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
370 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
371 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
372 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
373
374#undef DEFINE_HELPERS
375
376 /// Helper functions to construct and inspect unary operations (NEG and NOT)
377 /// via binary operators SUB and XOR:
378 ///
379 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
380 ///
381 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
382 Instruction *InsertBefore = nullptr);
383 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
384 BasicBlock *InsertAtEnd);
385 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
386 Instruction *InsertBefore = nullptr);
387 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
388 BasicBlock *InsertAtEnd);
389 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
390 Instruction *InsertBefore = nullptr);
391 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
392 BasicBlock *InsertAtEnd);
393 static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name = "",
394 Instruction *InsertBefore = nullptr);
395 static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name,
396 BasicBlock *InsertAtEnd);
397 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
398 Instruction *InsertBefore = nullptr);
399 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
400 BasicBlock *InsertAtEnd);
401
402 BinaryOps getOpcode() const {
403 return static_cast<BinaryOps>(Instruction::getOpcode());
404 }
405
406 /// Exchange the two operands to this instruction.
407 /// This instruction is safe to use on any binary instruction and
408 /// does not modify the semantics of the instruction. If the instruction
409 /// cannot be reversed (ie, it's a Div), then return true.
410 ///
411 bool swapOperands();
412
413 // Methods for support type inquiry through isa, cast, and dyn_cast:
414 static bool classof(const Instruction *I) {
415 return I->isBinaryOp();
416 }
417 static bool classof(const Value *V) {
418 return isa<Instruction>(V) && classof(cast<Instruction>(V));
419 }
420};
421
422template <>
423struct OperandTraits<BinaryOperator> :
424 public FixedNumOperandTraits<BinaryOperator, 2> {
425};
426
427DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<BinaryOperator>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 427, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this))[i_nocapture].get()); } void BinaryOperator
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<BinaryOperator>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BinaryOperator>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 427, __PRETTY_FUNCTION__)); OperandTraits<BinaryOperator
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
BinaryOperator::getNumOperands() const { return OperandTraits
<BinaryOperator>::operands(this); } template <int Idx_nocapture
> Use &BinaryOperator::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &BinaryOperator::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
428
429//===----------------------------------------------------------------------===//
430// CastInst Class
431//===----------------------------------------------------------------------===//
432
433/// This is the base class for all instructions that perform data
434/// casts. It is simply provided so that instruction category testing
435/// can be performed with code like:
436///
437/// if (isa<CastInst>(Instr)) { ... }
438/// Base class of casting instructions.
439class CastInst : public UnaryInstruction {
440protected:
441 /// Constructor with insert-before-instruction semantics for subclasses
442 CastInst(Type *Ty, unsigned iType, Value *S,
443 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
444 : UnaryInstruction(Ty, iType, S, InsertBefore) {
445 setName(NameStr);
446 }
447 /// Constructor with insert-at-end-of-block semantics for subclasses
448 CastInst(Type *Ty, unsigned iType, Value *S,
449 const Twine &NameStr, BasicBlock *InsertAtEnd)
450 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
451 setName(NameStr);
452 }
453
454public:
455 /// Provides a way to construct any of the CastInst subclasses using an
456 /// opcode instead of the subclass's constructor. The opcode must be in the
457 /// CastOps category (Instruction::isCast(opcode) returns true). This
458 /// constructor has insert-before-instruction semantics to automatically
459 /// insert the new CastInst before InsertBefore (if it is non-null).
460 /// Construct any of the CastInst subclasses
461 static CastInst *Create(
462 Instruction::CastOps, ///< The opcode of the cast instruction
463 Value *S, ///< The value to be casted (operand 0)
464 Type *Ty, ///< The type to which cast should be made
465 const Twine &Name = "", ///< Name for the instruction
466 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
467 );
468 /// Provides a way to construct any of the CastInst subclasses using an
469 /// opcode instead of the subclass's constructor. The opcode must be in the
470 /// CastOps category. This constructor has insert-at-end-of-block semantics
471 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
472 /// its non-null).
473 /// Construct any of the CastInst subclasses
474 static CastInst *Create(
475 Instruction::CastOps, ///< The opcode for the cast instruction
476 Value *S, ///< The value to be casted (operand 0)
477 Type *Ty, ///< The type to which operand is casted
478 const Twine &Name, ///< The name for the instruction
479 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
480 );
481
482 /// Create a ZExt or BitCast cast instruction
483 static CastInst *CreateZExtOrBitCast(
484 Value *S, ///< The value to be casted (operand 0)
485 Type *Ty, ///< The type to which cast should be made
486 const Twine &Name = "", ///< Name for the instruction
487 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
488 );
489
490 /// Create a ZExt or BitCast cast instruction
491 static CastInst *CreateZExtOrBitCast(
492 Value *S, ///< The value to be casted (operand 0)
493 Type *Ty, ///< The type to which operand is casted
494 const Twine &Name, ///< The name for the instruction
495 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
496 );
497
498 /// Create a SExt or BitCast cast instruction
499 static CastInst *CreateSExtOrBitCast(
500 Value *S, ///< The value to be casted (operand 0)
501 Type *Ty, ///< The type to which cast should be made
502 const Twine &Name = "", ///< Name for the instruction
503 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
504 );
505
506 /// Create a SExt or BitCast cast instruction
507 static CastInst *CreateSExtOrBitCast(
508 Value *S, ///< The value to be casted (operand 0)
509 Type *Ty, ///< The type to which operand is casted
510 const Twine &Name, ///< The name for the instruction
511 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
512 );
513
514 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
515 static CastInst *CreatePointerCast(
516 Value *S, ///< The pointer value to be casted (operand 0)
517 Type *Ty, ///< The type to which operand is casted
518 const Twine &Name, ///< The name for the instruction
519 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
520 );
521
522 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
523 static CastInst *CreatePointerCast(
524 Value *S, ///< The pointer value to be casted (operand 0)
525 Type *Ty, ///< The type to which cast should be made
526 const Twine &Name = "", ///< Name for the instruction
527 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
528 );
529
530 /// Create a BitCast or an AddrSpaceCast cast instruction.
531 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
532 Value *S, ///< The pointer value to be casted (operand 0)
533 Type *Ty, ///< The type to which operand is casted
534 const Twine &Name, ///< The name for the instruction
535 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
536 );
537
538 /// Create a BitCast or an AddrSpaceCast cast instruction.
539 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
540 Value *S, ///< The pointer value to be casted (operand 0)
541 Type *Ty, ///< The type to which cast should be made
542 const Twine &Name = "", ///< Name for the instruction
543 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
544 );
545
546 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
547 ///
548 /// If the value is a pointer type and the destination an integer type,
549 /// creates a PtrToInt cast. If the value is an integer type and the
550 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
551 /// a bitcast.
552 static CastInst *CreateBitOrPointerCast(
553 Value *S, ///< The pointer value to be casted (operand 0)
554 Type *Ty, ///< The type to which cast should be made
555 const Twine &Name = "", ///< Name for the instruction
556 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
557 );
558
559 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
560 static CastInst *CreateIntegerCast(
561 Value *S, ///< The pointer value to be casted (operand 0)
562 Type *Ty, ///< The type to which cast should be made
563 bool isSigned, ///< Whether to regard S as signed or not
564 const Twine &Name = "", ///< Name for the instruction
565 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
566 );
567
568 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
569 static CastInst *CreateIntegerCast(
570 Value *S, ///< The integer value to be casted (operand 0)
571 Type *Ty, ///< The integer type to which operand is casted
572 bool isSigned, ///< Whether to regard S as signed or not
573 const Twine &Name, ///< The name for the instruction
574 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
575 );
576
577 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
578 static CastInst *CreateFPCast(
579 Value *S, ///< The floating point value to be casted
580 Type *Ty, ///< The floating point type to cast to
581 const Twine &Name = "", ///< Name for the instruction
582 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
583 );
584
585 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
586 static CastInst *CreateFPCast(
587 Value *S, ///< The floating point value to be casted
588 Type *Ty, ///< The floating point type to cast to
589 const Twine &Name, ///< The name for the instruction
590 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
591 );
592
593 /// Create a Trunc or BitCast cast instruction
594 static CastInst *CreateTruncOrBitCast(
595 Value *S, ///< The value to be casted (operand 0)
596 Type *Ty, ///< The type to which cast should be made
597 const Twine &Name = "", ///< Name for the instruction
598 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
599 );
600
601 /// Create a Trunc or BitCast cast instruction
602 static CastInst *CreateTruncOrBitCast(
603 Value *S, ///< The value to be casted (operand 0)
604 Type *Ty, ///< The type to which operand is casted
605 const Twine &Name, ///< The name for the instruction
606 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
607 );
608
609 /// Check whether it is valid to call getCastOpcode for these types.
610 static bool isCastable(
611 Type *SrcTy, ///< The Type from which the value should be cast.
612 Type *DestTy ///< The Type to which the value should be cast.
613 );
614
615 /// Check whether a bitcast between these types is valid
616 static bool isBitCastable(
617 Type *SrcTy, ///< The Type from which the value should be cast.
618 Type *DestTy ///< The Type to which the value should be cast.
619 );
620
621 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
622 /// types is valid and a no-op.
623 ///
624 /// This ensures that any pointer<->integer cast has enough bits in the
625 /// integer and any other cast is a bitcast.
626 static bool isBitOrNoopPointerCastable(
627 Type *SrcTy, ///< The Type from which the value should be cast.
628 Type *DestTy, ///< The Type to which the value should be cast.
629 const DataLayout &DL);
630
631 /// Returns the opcode necessary to cast Val into Ty using usual casting
632 /// rules.
633 /// Infer the opcode for cast operand and type
634 static Instruction::CastOps getCastOpcode(
635 const Value *Val, ///< The value to cast
636 bool SrcIsSigned, ///< Whether to treat the source as signed
637 Type *Ty, ///< The Type to which the value should be casted
638 bool DstIsSigned ///< Whether to treate the dest. as signed
639 );
640
641 /// There are several places where we need to know if a cast instruction
642 /// only deals with integer source and destination types. To simplify that
643 /// logic, this method is provided.
644 /// @returns true iff the cast has only integral typed operand and dest type.
645 /// Determine if this is an integer-only cast.
646 bool isIntegerCast() const;
647
648 /// A lossless cast is one that does not alter the basic value. It implies
649 /// a no-op cast but is more stringent, preventing things like int->float,
650 /// long->double, or int->ptr.
651 /// @returns true iff the cast is lossless.
652 /// Determine if this is a lossless cast.
653 bool isLosslessCast() const;
654
655 /// A no-op cast is one that can be effected without changing any bits.
656 /// It implies that the source and destination types are the same size. The
657 /// DataLayout argument is to determine the pointer size when examining casts
658 /// involving Integer and Pointer types. They are no-op casts if the integer
659 /// is the same size as the pointer. However, pointer size varies with
660 /// platform.
661 /// Determine if the described cast is a no-op cast.
662 static bool isNoopCast(
663 Instruction::CastOps Opcode, ///< Opcode of cast
664 Type *SrcTy, ///< SrcTy of cast
665 Type *DstTy, ///< DstTy of cast
666 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
667 );
668
669 /// Determine if this cast is a no-op cast.
670 ///
671 /// \param DL is the DataLayout to determine pointer size.
672 bool isNoopCast(const DataLayout &DL) const;
673
674 /// Determine how a pair of casts can be eliminated, if they can be at all.
675 /// This is a helper function for both CastInst and ConstantExpr.
676 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
677 /// returns Instruction::CastOps value for a cast that can replace
678 /// the pair, casting SrcTy to DstTy.
679 /// Determine if a cast pair is eliminable
680 static unsigned isEliminableCastPair(
681 Instruction::CastOps firstOpcode, ///< Opcode of first cast
682 Instruction::CastOps secondOpcode, ///< Opcode of second cast
683 Type *SrcTy, ///< SrcTy of 1st cast
684 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
685 Type *DstTy, ///< DstTy of 2nd cast
686 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
687 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
688 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
689 );
690
691 /// Return the opcode of this CastInst
692 Instruction::CastOps getOpcode() const {
693 return Instruction::CastOps(Instruction::getOpcode());
694 }
695
696 /// Return the source type, as a convenience
697 Type* getSrcTy() const { return getOperand(0)->getType(); }
698 /// Return the destination type, as a convenience
699 Type* getDestTy() const { return getType(); }
700
701 /// This method can be used to determine if a cast from S to DstTy using
702 /// Opcode op is valid or not.
703 /// @returns true iff the proposed cast is valid.
704 /// Determine if a cast is valid without creating one.
705 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
706
707 /// Methods for support type inquiry through isa, cast, and dyn_cast:
708 static bool classof(const Instruction *I) {
709 return I->isCast();
710 }
711 static bool classof(const Value *V) {
712 return isa<Instruction>(V) && classof(cast<Instruction>(V));
713 }
714};
715
716//===----------------------------------------------------------------------===//
717// CmpInst Class
718//===----------------------------------------------------------------------===//
719
720/// This class is the base class for the comparison instructions.
721/// Abstract base class of comparison instructions.
722class CmpInst : public Instruction {
723public:
724 /// This enumeration lists the possible predicates for CmpInst subclasses.
725 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
726 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
727 /// predicate values are not overlapping between the classes.
728 ///
729 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
730 /// FCMP_* values. Changing the bit patterns requires a potential change to
731 /// those passes.
732 enum Predicate {
733 // Opcode U L G E Intuitive operation
734 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
735 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
736 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
737 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
738 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
739 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
740 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
741 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
742 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
743 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
744 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
745 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
746 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
747 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
748 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
749 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
750 FIRST_FCMP_PREDICATE = FCMP_FALSE,
751 LAST_FCMP_PREDICATE = FCMP_TRUE,
752 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
753 ICMP_EQ = 32, ///< equal
754 ICMP_NE = 33, ///< not equal
755 ICMP_UGT = 34, ///< unsigned greater than
756 ICMP_UGE = 35, ///< unsigned greater or equal
757 ICMP_ULT = 36, ///< unsigned less than
758 ICMP_ULE = 37, ///< unsigned less or equal
759 ICMP_SGT = 38, ///< signed greater than
760 ICMP_SGE = 39, ///< signed greater or equal
761 ICMP_SLT = 40, ///< signed less than
762 ICMP_SLE = 41, ///< signed less or equal
763 FIRST_ICMP_PREDICATE = ICMP_EQ,
764 LAST_ICMP_PREDICATE = ICMP_SLE,
765 BAD_ICMP_PREDICATE = ICMP_SLE + 1
766 };
767
768protected:
769 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
770 Value *LHS, Value *RHS, const Twine &Name = "",
771 Instruction *InsertBefore = nullptr,
772 Instruction *FlagsSource = nullptr);
773
774 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
775 Value *LHS, Value *RHS, const Twine &Name,
776 BasicBlock *InsertAtEnd);
777
778public:
779 // allocate space for exactly two operands
780 void *operator new(size_t s) {
781 return User::operator new(s, 2);
782 }
783
784 /// Construct a compare instruction, given the opcode, the predicate and
785 /// the two operands. Optionally (if InstBefore is specified) insert the
786 /// instruction into a BasicBlock right before the specified instruction.
787 /// The specified Instruction is allowed to be a dereferenced end iterator.
788 /// Create a CmpInst
789 static CmpInst *Create(OtherOps Op,
790 Predicate predicate, Value *S1,
791 Value *S2, const Twine &Name = "",
792 Instruction *InsertBefore = nullptr);
793
794 /// Construct a compare instruction, given the opcode, the predicate and the
795 /// two operands. Also automatically insert this instruction to the end of
796 /// the BasicBlock specified.
797 /// Create a CmpInst
798 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
799 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
800
801 /// Get the opcode casted to the right type
802 OtherOps getOpcode() const {
803 return static_cast<OtherOps>(Instruction::getOpcode());
804 }
805
806 /// Return the predicate for this instruction.
807 Predicate getPredicate() const {
808 return Predicate(getSubclassDataFromInstruction());
809 }
810
811 /// Set the predicate for this instruction to the specified value.
812 void setPredicate(Predicate P) { setInstructionSubclassData(P); }
813
814 static bool isFPPredicate(Predicate P) {
815 return P >= FIRST_FCMP_PREDICATE && P <= LAST_FCMP_PREDICATE;
816 }
817
818 static bool isIntPredicate(Predicate P) {
819 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
820 }
821
822 static StringRef getPredicateName(Predicate P);
823
824 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
825 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
826
827 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
828 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
829 /// @returns the inverse predicate for the instruction's current predicate.
830 /// Return the inverse of the instruction's predicate.
831 Predicate getInversePredicate() const {
832 return getInversePredicate(getPredicate());
833 }
834
835 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
836 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
837 /// @returns the inverse predicate for predicate provided in \p pred.
838 /// Return the inverse of a given predicate
839 static Predicate getInversePredicate(Predicate pred);
840
841 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
842 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
843 /// @returns the predicate that would be the result of exchanging the two
844 /// operands of the CmpInst instruction without changing the result
845 /// produced.
846 /// Return the predicate as if the operands were swapped
847 Predicate getSwappedPredicate() const {
848 return getSwappedPredicate(getPredicate());
849 }
850
851 /// This is a static version that you can use without an instruction
852 /// available.
853 /// Return the predicate as if the operands were swapped.
854 static Predicate getSwappedPredicate(Predicate pred);
855
856 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
857 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
858 /// does not support other kind of predicates.
859 /// @returns the predicate that does not contains is equal to zero if
860 /// it had and vice versa.
861 /// Return the flipped strictness of predicate
862 Predicate getFlippedStrictnessPredicate() const {
863 return getFlippedStrictnessPredicate(getPredicate());
864 }
865
866 /// This is a static version that you can use without an instruction
867 /// available.
868 /// Return the flipped strictness of predicate
869 static Predicate getFlippedStrictnessPredicate(Predicate pred);
870
871 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
872 /// Returns the non-strict version of strict comparisons.
873 Predicate getNonStrictPredicate() const {
874 return getNonStrictPredicate(getPredicate());
875 }
876
877 /// This is a static version that you can use without an instruction
878 /// available.
879 /// @returns the non-strict version of comparison provided in \p pred.
880 /// If \p pred is not a strict comparison predicate, returns \p pred.
881 /// Returns the non-strict version of strict comparisons.
882 static Predicate getNonStrictPredicate(Predicate pred);
883
884 /// Provide more efficient getOperand methods.
885 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
886
887 /// This is just a convenience that dispatches to the subclasses.
888 /// Swap the operands and adjust predicate accordingly to retain
889 /// the same comparison.
890 void swapOperands();
891
892 /// This is just a convenience that dispatches to the subclasses.
893 /// Determine if this CmpInst is commutative.
894 bool isCommutative() const;
895
896 /// This is just a convenience that dispatches to the subclasses.
897 /// Determine if this is an equals/not equals predicate.
898 bool isEquality() const;
899
900 /// @returns true if the comparison is signed, false otherwise.
901 /// Determine if this instruction is using a signed comparison.
902 bool isSigned() const {
903 return isSigned(getPredicate());
904 }
905
906 /// @returns true if the comparison is unsigned, false otherwise.
907 /// Determine if this instruction is using an unsigned comparison.
908 bool isUnsigned() const {
909 return isUnsigned(getPredicate());
910 }
911
912 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
913 /// @returns the signed version of the unsigned predicate pred.
914 /// return the signed version of a predicate
915 static Predicate getSignedPredicate(Predicate pred);
916
917 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
918 /// @returns the signed version of the predicate for this instruction (which
919 /// has to be an unsigned predicate).
920 /// return the signed version of a predicate
921 Predicate getSignedPredicate() {
922 return getSignedPredicate(getPredicate());
923 }
924
925 /// This is just a convenience.
926 /// Determine if this is true when both operands are the same.
927 bool isTrueWhenEqual() const {
928 return isTrueWhenEqual(getPredicate());
929 }
930
931 /// This is just a convenience.
932 /// Determine if this is false when both operands are the same.
933 bool isFalseWhenEqual() const {
934 return isFalseWhenEqual(getPredicate());
935 }
936
937 /// @returns true if the predicate is unsigned, false otherwise.
938 /// Determine if the predicate is an unsigned operation.
939 static bool isUnsigned(Predicate predicate);
940
941 /// @returns true if the predicate is signed, false otherwise.
942 /// Determine if the predicate is an signed operation.
943 static bool isSigned(Predicate predicate);
944
945 /// Determine if the predicate is an ordered operation.
946 static bool isOrdered(Predicate predicate);
947
948 /// Determine if the predicate is an unordered operation.
949 static bool isUnordered(Predicate predicate);
950
951 /// Determine if the predicate is true when comparing a value with itself.
952 static bool isTrueWhenEqual(Predicate predicate);
953
954 /// Determine if the predicate is false when comparing a value with itself.
955 static bool isFalseWhenEqual(Predicate predicate);
956
957 /// Determine if Pred1 implies Pred2 is true when two compares have matching
958 /// operands.
959 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
960
961 /// Determine if Pred1 implies Pred2 is false when two compares have matching
962 /// operands.
963 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
964
965 /// Methods for support type inquiry through isa, cast, and dyn_cast:
966 static bool classof(const Instruction *I) {
967 return I->getOpcode() == Instruction::ICmp ||
968 I->getOpcode() == Instruction::FCmp;
969 }
970 static bool classof(const Value *V) {
971 return isa<Instruction>(V) && classof(cast<Instruction>(V));
972 }
973
974 /// Create a result type for fcmp/icmp
975 static Type* makeCmpResultType(Type* opnd_type) {
976 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
977 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
978 vt->getElementCount());
979 }
980 return Type::getInt1Ty(opnd_type->getContext());
981 }
982
983private:
984 // Shadow Value::setValueSubclassData with a private forwarding method so that
985 // subclasses cannot accidentally use it.
986 void setValueSubclassData(unsigned short D) {
987 Value::setValueSubclassData(D);
988 }
989};
990
991// FIXME: these are redundant if CmpInst < BinaryOperator
992template <>
993struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
994};
995
996DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { ((i_nocapture < OperandTraits<CmpInst>::operands
(this) && "getOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 996, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CmpInst>::op_begin(const_cast<CmpInst
*>(this))[i_nocapture].get()); } void CmpInst::setOperand(
unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CmpInst>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CmpInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 996, __PRETTY_FUNCTION__)); OperandTraits<CmpInst>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned CmpInst::getNumOperands
() const { return OperandTraits<CmpInst>::operands(this
); } template <int Idx_nocapture> Use &CmpInst::Op(
) { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CmpInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
997
998/// A lightweight accessor for an operand bundle meant to be passed
999/// around by value.
1000struct OperandBundleUse {
1001 ArrayRef<Use> Inputs;
1002
1003 OperandBundleUse() = default;
1004 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1005 : Inputs(Inputs), Tag(Tag) {}
1006
1007 /// Return true if the operand at index \p Idx in this operand bundle
1008 /// has the attribute A.
1009 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1010 if (isDeoptOperandBundle())
1011 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1012 return Inputs[Idx]->getType()->isPointerTy();
1013
1014 // Conservative answer: no operands have any attributes.
1015 return false;
1016 }
1017
1018 /// Return the tag of this operand bundle as a string.
1019 StringRef getTagName() const {
1020 return Tag->getKey();
1021 }
1022
1023 /// Return the tag of this operand bundle as an integer.
1024 ///
1025 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1026 /// and this function returns the unique integer getOrInsertBundleTag
1027 /// associated the tag of this operand bundle to.
1028 uint32_t getTagID() const {
1029 return Tag->getValue();
1030 }
1031
1032 /// Return true if this is a "deopt" operand bundle.
1033 bool isDeoptOperandBundle() const {
1034 return getTagID() == LLVMContext::OB_deopt;
1035 }
1036
1037 /// Return true if this is a "funclet" operand bundle.
1038 bool isFuncletOperandBundle() const {
1039 return getTagID() == LLVMContext::OB_funclet;
1040 }
1041
1042 /// Return true if this is a "cfguardtarget" operand bundle.
1043 bool isCFGuardTargetOperandBundle() const {
1044 return getTagID() == LLVMContext::OB_cfguardtarget;
1045 }
1046
1047private:
1048 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1049 StringMapEntry<uint32_t> *Tag;
1050};
1051
1052/// A container for an operand bundle being viewed as a set of values
1053/// rather than a set of uses.
1054///
1055/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1056/// so it is possible to create and pass around "self-contained" instances of
1057/// OperandBundleDef and ConstOperandBundleDef.
1058template <typename InputTy> class OperandBundleDefT {
1059 std::string Tag;
1060 std::vector<InputTy> Inputs;
1061
1062public:
1063 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1064 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1065 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1066 : Tag(std::move(Tag)), Inputs(Inputs) {}
1067
1068 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1069 Tag = OBU.getTagName();
1070 Inputs.insert(Inputs.end(), OBU.Inputs.begin(), OBU.Inputs.end());
1071 }
1072
1073 ArrayRef<InputTy> inputs() const { return Inputs; }
1074
1075 using input_iterator = typename std::vector<InputTy>::const_iterator;
1076
1077 size_t input_size() const { return Inputs.size(); }
1078 input_iterator input_begin() const { return Inputs.begin(); }
1079 input_iterator input_end() const { return Inputs.end(); }
1080
1081 StringRef getTag() const { return Tag; }
1082};
1083
1084using OperandBundleDef = OperandBundleDefT<Value *>;
1085using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1086
1087//===----------------------------------------------------------------------===//
1088// CallBase Class
1089//===----------------------------------------------------------------------===//
1090
1091/// Base class for all callable instructions (InvokeInst and CallInst)
1092/// Holds everything related to calling a function.
1093///
1094/// All call-like instructions are required to use a common operand layout:
1095/// - Zero or more arguments to the call,
1096/// - Zero or more operand bundles with zero or more operand inputs each
1097/// bundle,
1098/// - Zero or more subclass controlled operands
1099/// - The called function.
1100///
1101/// This allows this base class to easily access the called function and the
1102/// start of the arguments without knowing how many other operands a particular
1103/// subclass requires. Note that accessing the end of the argument list isn't
1104/// as cheap as most other operations on the base class.
1105class CallBase : public Instruction {
1106protected:
1107 /// The last operand is the called operand.
1108 static constexpr int CalledOperandOpEndIdx = -1;
1109
1110 AttributeList Attrs; ///< parameter attributes for callable
1111 FunctionType *FTy;
1112
1113 template <class... ArgsTy>
1114 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1115 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1116
1117 using Instruction::Instruction;
1118
1119 bool hasDescriptor() const { return Value::HasDescriptor; }
1120
1121 unsigned getNumSubclassExtraOperands() const {
1122 switch (getOpcode()) {
1123 case Instruction::Call:
1124 return 0;
1125 case Instruction::Invoke:
1126 return 2;
1127 case Instruction::CallBr:
1128 return getNumSubclassExtraOperandsDynamic();
1129 }
1130 llvm_unreachable("Invalid opcode!")::llvm::llvm_unreachable_internal("Invalid opcode!", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1130)
;
1131 }
1132
1133 /// Get the number of extra operands for instructions that don't have a fixed
1134 /// number of extra operands.
1135 unsigned getNumSubclassExtraOperandsDynamic() const;
1136
1137public:
1138 using Instruction::getContext;
1139
1140 static bool classof(const Instruction *I) {
1141 return I->getOpcode() == Instruction::Call ||
1142 I->getOpcode() == Instruction::Invoke ||
1143 I->getOpcode() == Instruction::CallBr;
1144 }
1145 static bool classof(const Value *V) {
1146 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1147 }
1148
1149 FunctionType *getFunctionType() const { return FTy; }
1150
1151 void mutateFunctionType(FunctionType *FTy) {
1152 Value::mutateType(FTy->getReturnType());
1153 this->FTy = FTy;
1154 }
1155
1156 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1157
1158 /// data_operands_begin/data_operands_end - Return iterators iterating over
1159 /// the call / invoke argument list and bundle operands. For invokes, this is
1160 /// the set of instruction operands except the invoke target and the two
1161 /// successor blocks; and for calls this is the set of instruction operands
1162 /// except the call target.
1163 User::op_iterator data_operands_begin() { return op_begin(); }
1164 User::const_op_iterator data_operands_begin() const {
1165 return const_cast<CallBase *>(this)->data_operands_begin();
1166 }
1167 User::op_iterator data_operands_end() {
1168 // Walk from the end of the operands over the called operand and any
1169 // subclass operands.
1170 return op_end() - getNumSubclassExtraOperands() - 1;
1171 }
1172 User::const_op_iterator data_operands_end() const {
1173 return const_cast<CallBase *>(this)->data_operands_end();
1174 }
1175 iterator_range<User::op_iterator> data_ops() {
1176 return make_range(data_operands_begin(), data_operands_end());
1177 }
1178 iterator_range<User::const_op_iterator> data_ops() const {
1179 return make_range(data_operands_begin(), data_operands_end());
1180 }
1181 bool data_operands_empty() const {
1182 return data_operands_end() == data_operands_begin();
1183 }
1184 unsigned data_operands_size() const {
1185 return std::distance(data_operands_begin(), data_operands_end());
1186 }
1187
1188 bool isDataOperand(const Use *U) const {
1189 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1190, __PRETTY_FUNCTION__))
1190 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1190, __PRETTY_FUNCTION__))
;
1191 return data_operands_begin() <= U && U < data_operands_end();
1192 }
1193 bool isDataOperand(Value::const_user_iterator UI) const {
1194 return isDataOperand(&UI.getUse());
1195 }
1196
1197 /// Given a value use iterator, return the data operand corresponding to it.
1198 /// Iterator must actually correspond to a data operand.
1199 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1200 return getDataOperandNo(&UI.getUse());
1201 }
1202
1203 /// Given a use for a data operand, get the data operand number that
1204 /// corresponds to it.
1205 unsigned getDataOperandNo(const Use *U) const {
1206 assert(isDataOperand(U) && "Data operand # out of range!")((isDataOperand(U) && "Data operand # out of range!")
? static_cast<void> (0) : __assert_fail ("isDataOperand(U) && \"Data operand # out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1206, __PRETTY_FUNCTION__))
;
1207 return U - data_operands_begin();
1208 }
1209
1210 /// Return the iterator pointing to the beginning of the argument list.
1211 User::op_iterator arg_begin() { return op_begin(); }
1212 User::const_op_iterator arg_begin() const {
1213 return const_cast<CallBase *>(this)->arg_begin();
1214 }
1215
1216 /// Return the iterator pointing to the end of the argument list.
1217 User::op_iterator arg_end() {
1218 // From the end of the data operands, walk backwards past the bundle
1219 // operands.
1220 return data_operands_end() - getNumTotalBundleOperands();
1221 }
1222 User::const_op_iterator arg_end() const {
1223 return const_cast<CallBase *>(this)->arg_end();
1224 }
1225
1226 /// Iteration adapter for range-for loops.
1227 iterator_range<User::op_iterator> args() {
1228 return make_range(arg_begin(), arg_end());
1229 }
1230 iterator_range<User::const_op_iterator> args() const {
1231 return make_range(arg_begin(), arg_end());
1232 }
1233 bool arg_empty() const { return arg_end() == arg_begin(); }
1234 unsigned arg_size() const { return arg_end() - arg_begin(); }
1235
1236 // Legacy API names that duplicate the above and will be removed once users
1237 // are migrated.
1238 iterator_range<User::op_iterator> arg_operands() {
1239 return make_range(arg_begin(), arg_end());
1240 }
1241 iterator_range<User::const_op_iterator> arg_operands() const {
1242 return make_range(arg_begin(), arg_end());
1243 }
1244 unsigned getNumArgOperands() const { return arg_size(); }
1245
1246 Value *getArgOperand(unsigned i) const {
1247 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1247, __PRETTY_FUNCTION__))
;
1248 return getOperand(i);
1249 }
1250
1251 void setArgOperand(unsigned i, Value *v) {
1252 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1252, __PRETTY_FUNCTION__))
;
1253 setOperand(i, v);
1254 }
1255
1256 /// Wrappers for getting the \c Use of a call argument.
1257 const Use &getArgOperandUse(unsigned i) const {
1258 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1258, __PRETTY_FUNCTION__))
;
1259 return User::getOperandUse(i);
1260 }
1261 Use &getArgOperandUse(unsigned i) {
1262 assert(i < getNumArgOperands() && "Out of bounds!")((i < getNumArgOperands() && "Out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < getNumArgOperands() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1262, __PRETTY_FUNCTION__))
;
1263 return User::getOperandUse(i);
1264 }
1265
1266 bool isArgOperand(const Use *U) const {
1267 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1268, __PRETTY_FUNCTION__))
1268 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1268, __PRETTY_FUNCTION__))
;
1269 return arg_begin() <= U && U < arg_end();
1270 }
1271 bool isArgOperand(Value::const_user_iterator UI) const {
1272 return isArgOperand(&UI.getUse());
1273 }
1274
1275 /// Given a use for a arg operand, get the arg operand number that
1276 /// corresponds to it.
1277 unsigned getArgOperandNo(const Use *U) const {
1278 assert(isArgOperand(U) && "Arg operand # out of range!")((isArgOperand(U) && "Arg operand # out of range!") ?
static_cast<void> (0) : __assert_fail ("isArgOperand(U) && \"Arg operand # out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1278, __PRETTY_FUNCTION__))
;
1279 return U - arg_begin();
1280 }
1281
1282 /// Given a value use iterator, return the arg operand number corresponding to
1283 /// it. Iterator must actually correspond to a data operand.
1284 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1285 return getArgOperandNo(&UI.getUse());
1286 }
1287
1288 /// Returns true if this CallSite passes the given Value* as an argument to
1289 /// the called function.
1290 bool hasArgument(const Value *V) const {
1291 return llvm::any_of(args(), [V](const Value *Arg) { return Arg == V; });
1292 }
1293
1294 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1295
1296 // DEPRECATED: This routine will be removed in favor of `getCalledOperand` in
1297 // the near future.
1298 Value *getCalledValue() const { return getCalledOperand(); }
1299
1300 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1301 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1302
1303 /// Returns the function called, or null if this is an
1304 /// indirect function invocation.
1305 Function *getCalledFunction() const {
1306 return dyn_cast_or_null<Function>(getCalledOperand());
21
Assuming null pointer is passed into cast
22
Returning null pointer, which participates in a condition later
1307 }
1308
1309 /// Return true if the callsite is an indirect call.
1310 bool isIndirectCall() const;
1311
1312 /// Determine whether the passed iterator points to the callee operand's Use.
1313 bool isCallee(Value::const_user_iterator UI) const {
1314 return isCallee(&UI.getUse());
1315 }
1316
1317 /// Determine whether this Use is the callee operand's Use.
1318 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1319
1320 /// Helper to get the caller (the parent function).
1321 Function *getCaller();
1322 const Function *getCaller() const {
1323 return const_cast<CallBase *>(this)->getCaller();
1324 }
1325
1326 /// Tests if this call site must be tail call optimized. Only a CallInst can
1327 /// be tail call optimized.
1328 bool isMustTailCall() const;
1329
1330 /// Tests if this call site is marked as a tail call.
1331 bool isTailCall() const;
1332
1333 /// Returns the intrinsic ID of the intrinsic called or
1334 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1335 /// this is an indirect call.
1336 Intrinsic::ID getIntrinsicID() const;
1337
1338 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1339
1340 /// Sets the function called, including updating the function type.
1341 void setCalledFunction(Function *Fn) {
1342 setCalledFunction(Fn->getFunctionType(), Fn);
1343 }
1344
1345 /// Sets the function called, including updating the function type.
1346 void setCalledFunction(FunctionCallee Fn) {
1347 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1348 }
1349
1350 /// Sets the function called, including updating to the specified function
1351 /// type.
1352 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1353 this->FTy = FTy;
1354 assert(FTy == cast<FunctionType>(((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1355, __PRETTY_FUNCTION__))
1355 cast<PointerType>(Fn->getType())->getElementType()))((FTy == cast<FunctionType>( cast<PointerType>(Fn
->getType())->getElementType())) ? static_cast<void>
(0) : __assert_fail ("FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1355, __PRETTY_FUNCTION__))
;
1356 // This function doesn't mutate the return type, only the function
1357 // type. Seems broken, but I'm just gonna stick an assert in for now.
1358 assert(getType() == FTy->getReturnType())((getType() == FTy->getReturnType()) ? static_cast<void
> (0) : __assert_fail ("getType() == FTy->getReturnType()"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1358, __PRETTY_FUNCTION__))
;
1359 setCalledOperand(Fn);
1360 }
1361
1362 CallingConv::ID getCallingConv() const {
1363 return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
1364 }
1365
1366 void setCallingConv(CallingConv::ID CC) {
1367 auto ID = static_cast<unsigned>(CC);
1368 assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention")((!(ID & ~CallingConv::MaxID) && "Unsupported calling convention"
) ? static_cast<void> (0) : __assert_fail ("!(ID & ~CallingConv::MaxID) && \"Unsupported calling convention\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1368, __PRETTY_FUNCTION__))
;
1369 setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
1370 (ID << 2));
1371 }
1372
1373 /// Check if this call is an inline asm statement.
1374 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1375
1376 /// \name Attribute API
1377 ///
1378 /// These methods access and modify attributes on this call (including
1379 /// looking through to the attributes on the called function when necessary).
1380 ///@{
1381
1382 /// Return the parameter attributes for this call.
1383 ///
1384 AttributeList getAttributes() const { return Attrs; }
1385
1386 /// Set the parameter attributes for this call.
1387 ///
1388 void setAttributes(AttributeList A) { Attrs = A; }
1389
1390 /// Determine whether this call has the given attribute.
1391 bool hasFnAttr(Attribute::AttrKind Kind) const {
1392 assert(Kind != Attribute::NoBuiltin &&((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1393, __PRETTY_FUNCTION__))
1393 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")((Kind != Attribute::NoBuiltin && "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin"
) ? static_cast<void> (0) : __assert_fail ("Kind != Attribute::NoBuiltin && \"Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1393, __PRETTY_FUNCTION__))
;
1394 return hasFnAttrImpl(Kind);
1395 }
1396
1397 /// Determine whether this call has the given attribute.
1398 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1399
1400 /// adds the attribute to the list of attributes.
1401 void addAttribute(unsigned i, Attribute::AttrKind Kind) {
1402 AttributeList PAL = getAttributes();
1403 PAL = PAL.addAttribute(getContext(), i, Kind);
1404 setAttributes(PAL);
1405 }
1406
1407 /// adds the attribute to the list of attributes.
1408 void addAttribute(unsigned i, Attribute Attr) {
1409 AttributeList PAL = getAttributes();
1410 PAL = PAL.addAttribute(getContext(), i, Attr);
1411 setAttributes(PAL);
1412 }
1413
1414 /// Adds the attribute to the indicated argument
1415 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1416 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1416, __PRETTY_FUNCTION__))
;
1417 AttributeList PAL = getAttributes();
1418 PAL = PAL.addParamAttribute(getContext(), ArgNo, Kind);
1419 setAttributes(PAL);
1420 }
1421
1422 /// Adds the attribute to the indicated argument
1423 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1424 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1424, __PRETTY_FUNCTION__))
;
1425 AttributeList PAL = getAttributes();
1426 PAL = PAL.addParamAttribute(getContext(), ArgNo, Attr);
1427 setAttributes(PAL);
1428 }
1429
1430 /// removes the attribute from the list of attributes.
1431 void removeAttribute(unsigned i, Attribute::AttrKind Kind) {
1432 AttributeList PAL = getAttributes();
1433 PAL = PAL.removeAttribute(getContext(), i, Kind);
1434 setAttributes(PAL);
1435 }
1436
1437 /// removes the attribute from the list of attributes.
1438 void removeAttribute(unsigned i, StringRef Kind) {
1439 AttributeList PAL = getAttributes();
1440 PAL = PAL.removeAttribute(getContext(), i, Kind);
1441 setAttributes(PAL);
1442 }
1443
1444 /// Removes the attribute from the given argument
1445 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1446 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1446, __PRETTY_FUNCTION__))
;
1447 AttributeList PAL = getAttributes();
1448 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1449 setAttributes(PAL);
1450 }
1451
1452 /// Removes the attribute from the given argument
1453 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1454 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1454, __PRETTY_FUNCTION__))
;
1455 AttributeList PAL = getAttributes();
1456 PAL = PAL.removeParamAttribute(getContext(), ArgNo, Kind);
1457 setAttributes(PAL);
1458 }
1459
1460 /// adds the dereferenceable attribute to the list of attributes.
1461 void addDereferenceableAttr(unsigned i, uint64_t Bytes) {
1462 AttributeList PAL = getAttributes();
1463 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
1464 setAttributes(PAL);
1465 }
1466
1467 /// adds the dereferenceable_or_null attribute to the list of
1468 /// attributes.
1469 void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
1470 AttributeList PAL = getAttributes();
1471 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
1472 setAttributes(PAL);
1473 }
1474
1475 /// Determine whether the return value has the given attribute.
1476 bool hasRetAttr(Attribute::AttrKind Kind) const;
1477
1478 /// Determine whether the argument or parameter has the given attribute.
1479 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1480
1481 /// Get the attribute of a given kind at a position.
1482 Attribute getAttribute(unsigned i, Attribute::AttrKind Kind) const {
1483 return getAttributes().getAttribute(i, Kind);
1484 }
1485
1486 /// Get the attribute of a given kind at a position.
1487 Attribute getAttribute(unsigned i, StringRef Kind) const {
1488 return getAttributes().getAttribute(i, Kind);
1489 }
1490
1491 /// Get the attribute of a given kind from a given arg
1492 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1493 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1493, __PRETTY_FUNCTION__))
;
1494 return getAttributes().getParamAttr(ArgNo, Kind);
1495 }
1496
1497 /// Get the attribute of a given kind from a given arg
1498 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1499 assert(ArgNo < getNumArgOperands() && "Out of bounds")((ArgNo < getNumArgOperands() && "Out of bounds") ?
static_cast<void> (0) : __assert_fail ("ArgNo < getNumArgOperands() && \"Out of bounds\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1499, __PRETTY_FUNCTION__))
;
1500 return getAttributes().getParamAttr(ArgNo, Kind);
1501 }
1502
1503 /// Return true if the data operand at index \p i has the attribute \p
1504 /// A.
1505 ///
1506 /// Data operands include call arguments and values used in operand bundles,
1507 /// but does not include the callee operand. This routine dispatches to the
1508 /// underlying AttributeList or the OperandBundleUser as appropriate.
1509 ///
1510 /// The index \p i is interpreted as
1511 ///
1512 /// \p i == Attribute::ReturnIndex -> the return value
1513 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1514 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1515 /// (\p i - 1) in the operand list.
1516 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1517 // Note that we have to add one because `i` isn't zero-indexed.
1518 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1519, __PRETTY_FUNCTION__))
1519 "Data operand index out of bounds!")((i < (getNumArgOperands() + getNumTotalBundleOperands() +
1) && "Data operand index out of bounds!") ? static_cast
<void> (0) : __assert_fail ("i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) && \"Data operand index out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1519, __PRETTY_FUNCTION__))
;
1520
1521 // The attribute A can either be directly specified, if the operand in
1522 // question is a call argument; or be indirectly implied by the kind of its
1523 // containing operand bundle, if the operand is a bundle operand.
1524
1525 if (i == AttributeList::ReturnIndex)
1526 return hasRetAttr(Kind);
1527
1528 // FIXME: Avoid these i - 1 calculations and update the API to use
1529 // zero-based indices.
1530 if (i < (getNumArgOperands() + 1))
1531 return paramHasAttr(i - 1, Kind);
1532
1533 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1534, __PRETTY_FUNCTION__))
1534 "Must be either a call argument or an operand bundle!")((hasOperandBundles() && i >= (getBundleOperandsStartIndex
() + 1) && "Must be either a call argument or an operand bundle!"
) ? static_cast<void> (0) : __assert_fail ("hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) && \"Must be either a call argument or an operand bundle!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1534, __PRETTY_FUNCTION__))
;
1535 return bundleOperandHasAttr(i - 1, Kind);
1536 }
1537
1538 /// Determine whether this data operand is not captured.
1539 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1540 // better indicate that this may return a conservative answer.
1541 bool doesNotCapture(unsigned OpNo) const {
1542 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1543 }
1544
1545 /// Determine whether this argument is passed by value.
1546 bool isByValArgument(unsigned ArgNo) const {
1547 return paramHasAttr(ArgNo, Attribute::ByVal);
1548 }
1549
1550 /// Determine whether this argument is passed in an alloca.
1551 bool isInAllocaArgument(unsigned ArgNo) const {
1552 return paramHasAttr(ArgNo, Attribute::InAlloca);
1553 }
1554
1555 /// Determine whether this argument is passed by value or in an alloca.
1556 bool isByValOrInAllocaArgument(unsigned ArgNo) const {
1557 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1558 paramHasAttr(ArgNo, Attribute::InAlloca);
1559 }
1560
1561 /// Determine if there are is an inalloca argument. Only the last argument can
1562 /// have the inalloca attribute.
1563 bool hasInAllocaArgument() const {
1564 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1565 }
1566
1567 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1568 // better indicate that this may return a conservative answer.
1569 bool doesNotAccessMemory(unsigned OpNo) const {
1570 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1571 }
1572
1573 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1574 // better indicate that this may return a conservative answer.
1575 bool onlyReadsMemory(unsigned OpNo) const {
1576 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1577 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1578 }
1579
1580 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1581 // better indicate that this may return a conservative answer.
1582 bool doesNotReadMemory(unsigned OpNo) const {
1583 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1584 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1585 }
1586
1587 /// Extract the alignment of the return value.
1588 /// FIXME: Remove this function once transition to Align is over.
1589 /// Use getRetAlign() instead.
1590 unsigned getRetAlignment() const {
1591 if (const auto MA = Attrs.getRetAlignment())
1592 return MA->value();
1593 return 0;
1594 }
1595
1596 /// Extract the alignment of the return value.
1597 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1598
1599 /// Extract the alignment for a call or parameter (0=unknown).
1600 /// FIXME: Remove this function once transition to Align is over.
1601 /// Use getParamAlign() instead.
1602 unsigned getParamAlignment(unsigned ArgNo) const {
1603 if (const auto MA = Attrs.getParamAlignment(ArgNo))
1604 return MA->value();
1605 return 0;
1606 }
1607
1608 /// Extract the alignment for a call or parameter (0=unknown).
1609 MaybeAlign getParamAlign(unsigned ArgNo) const {
1610 return Attrs.getParamAlignment(ArgNo);
1611 }
1612
1613 /// Extract the byval type for a call or parameter.
1614 Type *getParamByValType(unsigned ArgNo) const {
1615 Type *Ty = Attrs.getParamByValType(ArgNo);
1616 return Ty ? Ty : getArgOperand(ArgNo)->getType()->getPointerElementType();
1617 }
1618
1619 /// Extract the number of dereferenceable bytes for a call or
1620 /// parameter (0=unknown).
1621 uint64_t getDereferenceableBytes(unsigned i) const {
1622 return Attrs.getDereferenceableBytes(i);
1623 }
1624
1625 /// Extract the number of dereferenceable_or_null bytes for a call or
1626 /// parameter (0=unknown).
1627 uint64_t getDereferenceableOrNullBytes(unsigned i) const {
1628 return Attrs.getDereferenceableOrNullBytes(i);
1629 }
1630
1631 /// Return true if the return value is known to be not null.
1632 /// This may be because it has the nonnull attribute, or because at least
1633 /// one byte is dereferenceable and the pointer is in addrspace(0).
1634 bool isReturnNonNull() const;
1635
1636 /// Determine if the return value is marked with NoAlias attribute.
1637 bool returnDoesNotAlias() const {
1638 return Attrs.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1639 }
1640
1641 /// If one of the arguments has the 'returned' attribute, returns its
1642 /// operand value. Otherwise, return nullptr.
1643 Value *getReturnedArgOperand() const;
1644
1645 /// Return true if the call should not be treated as a call to a
1646 /// builtin.
1647 bool isNoBuiltin() const {
1648 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1649 !hasFnAttrImpl(Attribute::Builtin);
1650 }
1651
1652 /// Determine if the call requires strict floating point semantics.
1653 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1654
1655 /// Return true if the call should not be inlined.
1656 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1657 void setIsNoInline() {
1658 addAttribute(AttributeList::FunctionIndex, Attribute::NoInline);
1659 }
1660 /// Determine if the call does not access memory.
1661 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1662 void setDoesNotAccessMemory() {
1663 addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
1664 }
1665
1666 /// Determine if the call does not access or only reads memory.
1667 bool onlyReadsMemory() const {
1668 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1669 }
1670 void setOnlyReadsMemory() {
1671 addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
1672 }
1673
1674 /// Determine if the call does not access or only writes memory.
1675 bool doesNotReadMemory() const {
1676 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1677 }
1678 void setDoesNotReadMemory() {
1679 addAttribute(AttributeList::FunctionIndex, Attribute::WriteOnly);
1680 }
1681
1682 /// Determine if the call can access memmory only using pointers based
1683 /// on its arguments.
1684 bool onlyAccessesArgMemory() const {
1685 return hasFnAttr(Attribute::ArgMemOnly);
1686 }
1687 void setOnlyAccessesArgMemory() {
1688 addAttribute(AttributeList::FunctionIndex, Attribute::ArgMemOnly);
1689 }
1690
1691 /// Determine if the function may only access memory that is
1692 /// inaccessible from the IR.
1693 bool onlyAccessesInaccessibleMemory() const {
1694 return hasFnAttr(Attribute::InaccessibleMemOnly);
1695 }
1696 void setOnlyAccessesInaccessibleMemory() {
1697 addAttribute(AttributeList::FunctionIndex, Attribute::InaccessibleMemOnly);
1698 }
1699
1700 /// Determine if the function may only access memory that is
1701 /// either inaccessible from the IR or pointed to by its arguments.
1702 bool onlyAccessesInaccessibleMemOrArgMem() const {
1703 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1704 }
1705 void setOnlyAccessesInaccessibleMemOrArgMem() {
1706 addAttribute(AttributeList::FunctionIndex,
1707 Attribute::InaccessibleMemOrArgMemOnly);
1708 }
1709 /// Determine if the call cannot return.
1710 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1711 void setDoesNotReturn() {
1712 addAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
1713 }
1714
1715 /// Determine if the call should not perform indirect branch tracking.
1716 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1717
1718 /// Determine if the call cannot unwind.
1719 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1720 void setDoesNotThrow() {
1721 addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
1722 }
1723
1724 /// Determine if the invoke cannot be duplicated.
1725 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1726 void setCannotDuplicate() {
1727 addAttribute(AttributeList::FunctionIndex, Attribute::NoDuplicate);
1728 }
1729
1730 /// Determine if the invoke is convergent
1731 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
1732 void setConvergent() {
1733 addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1734 }
1735 void setNotConvergent() {
1736 removeAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
1737 }
1738
1739 /// Determine if the call returns a structure through first
1740 /// pointer argument.
1741 bool hasStructRetAttr() const {
1742 if (getNumArgOperands() == 0)
1743 return false;
1744
1745 // Be friendly and also check the callee.
1746 return paramHasAttr(0, Attribute::StructRet);
1747 }
1748
1749 /// Determine if any call argument is an aggregate passed by value.
1750 bool hasByValArgument() const {
1751 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1752 }
1753
1754 ///@{
1755 // End of attribute API.
1756
1757 /// \name Operand Bundle API
1758 ///
1759 /// This group of methods provides the API to access and manipulate operand
1760 /// bundles on this call.
1761 /// @{
1762
1763 /// Return the number of operand bundles associated with this User.
1764 unsigned getNumOperandBundles() const {
1765 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1766 }
1767
1768 /// Return true if this User has any operand bundles.
1769 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1770
1771 /// Return the index of the first bundle operand in the Use array.
1772 unsigned getBundleOperandsStartIndex() const {
1773 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1773, __PRETTY_FUNCTION__))
;
1774 return bundle_op_info_begin()->Begin;
1775 }
1776
1777 /// Return the index of the last bundle operand in the Use array.
1778 unsigned getBundleOperandsEndIndex() const {
1779 assert(hasOperandBundles() && "Don't call otherwise!")((hasOperandBundles() && "Don't call otherwise!") ? static_cast
<void> (0) : __assert_fail ("hasOperandBundles() && \"Don't call otherwise!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1779, __PRETTY_FUNCTION__))
;
1780 return bundle_op_info_end()[-1].End;
1781 }
1782
1783 /// Return true if the operand at index \p Idx is a bundle operand.
1784 bool isBundleOperand(unsigned Idx) const {
1785 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1786 Idx < getBundleOperandsEndIndex();
1787 }
1788
1789 /// Returns true if the use is a bundle operand.
1790 bool isBundleOperand(const Use *U) const {
1791 assert(this == U->getUser() &&((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1792, __PRETTY_FUNCTION__))
1792 "Only valid to query with a use of this instruction!")((this == U->getUser() && "Only valid to query with a use of this instruction!"
) ? static_cast<void> (0) : __assert_fail ("this == U->getUser() && \"Only valid to query with a use of this instruction!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1792, __PRETTY_FUNCTION__))
;
1793 return hasOperandBundles() && isBundleOperand(U - op_begin());
1794 }
1795 bool isBundleOperand(Value::const_user_iterator UI) const {
1796 return isBundleOperand(&UI.getUse());
1797 }
1798
1799 /// Return the total number operands (not operand bundles) used by
1800 /// every operand bundle in this OperandBundleUser.
1801 unsigned getNumTotalBundleOperands() const {
1802 if (!hasOperandBundles())
1803 return 0;
1804
1805 unsigned Begin = getBundleOperandsStartIndex();
1806 unsigned End = getBundleOperandsEndIndex();
1807
1808 assert(Begin <= End && "Should be!")((Begin <= End && "Should be!") ? static_cast<void
> (0) : __assert_fail ("Begin <= End && \"Should be!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1808, __PRETTY_FUNCTION__))
;
1809 return End - Begin;
1810 }
1811
1812 /// Return the operand bundle at a specific index.
1813 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1814 assert(Index < getNumOperandBundles() && "Index out of bounds!")((Index < getNumOperandBundles() && "Index out of bounds!"
) ? static_cast<void> (0) : __assert_fail ("Index < getNumOperandBundles() && \"Index out of bounds!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1814, __PRETTY_FUNCTION__))
;
1815 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1816 }
1817
1818 /// Return the number of operand bundles with the tag Name attached to
1819 /// this instruction.
1820 unsigned countOperandBundlesOfType(StringRef Name) const {
1821 unsigned Count = 0;
1822 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1823 if (getOperandBundleAt(i).getTagName() == Name)
1824 Count++;
1825
1826 return Count;
1827 }
1828
1829 /// Return the number of operand bundles with the tag ID attached to
1830 /// this instruction.
1831 unsigned countOperandBundlesOfType(uint32_t ID) const {
1832 unsigned Count = 0;
1833 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1834 if (getOperandBundleAt(i).getTagID() == ID)
1835 Count++;
1836
1837 return Count;
1838 }
1839
1840 /// Return an operand bundle by name, if present.
1841 ///
1842 /// It is an error to call this for operand bundle types that may have
1843 /// multiple instances of them on the same instruction.
1844 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1845 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")((countOperandBundlesOfType(Name) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(Name) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1845, __PRETTY_FUNCTION__))
;
1846
1847 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1848 OperandBundleUse U = getOperandBundleAt(i);
1849 if (U.getTagName() == Name)
1850 return U;
1851 }
1852
1853 return None;
1854 }
1855
1856 /// Return an operand bundle by tag ID, if present.
1857 ///
1858 /// It is an error to call this for operand bundle types that may have
1859 /// multiple instances of them on the same instruction.
1860 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
1861 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")((countOperandBundlesOfType(ID) < 2 && "Precondition violated!"
) ? static_cast<void> (0) : __assert_fail ("countOperandBundlesOfType(ID) < 2 && \"Precondition violated!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1861, __PRETTY_FUNCTION__))
;
1862
1863 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1864 OperandBundleUse U = getOperandBundleAt(i);
1865 if (U.getTagID() == ID)
1866 return U;
1867 }
1868
1869 return None;
1870 }
1871
1872 /// Return the list of operand bundles attached to this instruction as
1873 /// a vector of OperandBundleDefs.
1874 ///
1875 /// This function copies the OperandBundeUse instances associated with this
1876 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
1877 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
1878 /// representations of operand bundles (see documentation above).
1879 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
1880 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1881 Defs.emplace_back(getOperandBundleAt(i));
1882 }
1883
1884 /// Return the operand bundle for the operand at index OpIdx.
1885 ///
1886 /// It is an error to call this with an OpIdx that does not correspond to an
1887 /// bundle operand.
1888 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
1889 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
1890 }
1891
1892 /// Return true if this operand bundle user has operand bundles that
1893 /// may read from the heap.
1894 bool hasReadingOperandBundles() const {
1895 // Implementation note: this is a conservative implementation of operand
1896 // bundle semantics, where *any* operand bundle forces a callsite to be at
1897 // least readonly.
1898 return hasOperandBundles();
1899 }
1900
1901 /// Return true if this operand bundle user has operand bundles that
1902 /// may write to the heap.
1903 bool hasClobberingOperandBundles() const {
1904 for (auto &BOI : bundle_op_infos()) {
1905 if (BOI.Tag->second == LLVMContext::OB_deopt ||
1906 BOI.Tag->second == LLVMContext::OB_funclet)
1907 continue;
1908
1909 // This instruction has an operand bundle that is not known to us.
1910 // Assume the worst.
1911 return true;
1912 }
1913
1914 return false;
1915 }
1916
1917 /// Return true if the bundle operand at index \p OpIdx has the
1918 /// attribute \p A.
1919 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
1920 auto &BOI = getBundleOpInfoForOperand(OpIdx);
1921 auto OBU = operandBundleFromBundleOpInfo(BOI);
1922 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
1923 }
1924
1925 /// Return true if \p Other has the same sequence of operand bundle
1926 /// tags with the same number of operands on each one of them as this
1927 /// OperandBundleUser.
1928 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
1929 if (getNumOperandBundles() != Other.getNumOperandBundles())
1930 return false;
1931
1932 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
1933 Other.bundle_op_info_begin());
1934 }
1935
1936 /// Return true if this operand bundle user contains operand bundles
1937 /// with tags other than those specified in \p IDs.
1938 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
1939 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1940 uint32_t ID = getOperandBundleAt(i).getTagID();
1941 if (!is_contained(IDs, ID))
1942 return true;
1943 }
1944 return false;
1945 }
1946
1947 /// Is the function attribute S disallowed by some operand bundle on
1948 /// this operand bundle user?
1949 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
1950 // Operand bundles only possibly disallow readnone, readonly and argmemonly
1951 // attributes. All String attributes are fine.
1952 return false;
1953 }
1954
1955 /// Is the function attribute A disallowed by some operand bundle on
1956 /// this operand bundle user?
1957 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
1958 switch (A) {
1959 default:
1960 return false;
1961
1962 case Attribute::InaccessibleMemOrArgMemOnly:
1963 return hasReadingOperandBundles();
1964
1965 case Attribute::InaccessibleMemOnly:
1966 return hasReadingOperandBundles();
1967
1968 case Attribute::ArgMemOnly:
1969 return hasReadingOperandBundles();
1970
1971 case Attribute::ReadNone:
1972 return hasReadingOperandBundles();
1973
1974 case Attribute::ReadOnly:
1975 return hasClobberingOperandBundles();
1976 }
1977
1978 llvm_unreachable("switch has a default case!")::llvm::llvm_unreachable_internal("switch has a default case!"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 1978)
;
1979 }
1980
1981 /// Used to keep track of an operand bundle. See the main comment on
1982 /// OperandBundleUser above.
1983 struct BundleOpInfo {
1984 /// The operand bundle tag, interned by
1985 /// LLVMContextImpl::getOrInsertBundleTag.
1986 StringMapEntry<uint32_t> *Tag;
1987
1988 /// The index in the Use& vector where operands for this operand
1989 /// bundle starts.
1990 uint32_t Begin;
1991
1992 /// The index in the Use& vector where operands for this operand
1993 /// bundle ends.
1994 uint32_t End;
1995
1996 bool operator==(const BundleOpInfo &Other) const {
1997 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
1998 }
1999 };
2000
2001 /// Simple helper function to map a BundleOpInfo to an
2002 /// OperandBundleUse.
2003 OperandBundleUse
2004 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2005 auto begin = op_begin();
2006 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2007 return OperandBundleUse(BOI.Tag, Inputs);
2008 }
2009
2010 using bundle_op_iterator = BundleOpInfo *;
2011 using const_bundle_op_iterator = const BundleOpInfo *;
2012
2013 /// Return the start of the list of BundleOpInfo instances associated
2014 /// with this OperandBundleUser.
2015 ///
2016 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2017 /// to store some meta information about which operands are "normal" operands,
2018 /// and which ones belong to some operand bundle.
2019 ///
2020 /// The layout of an operand bundle user is
2021 ///
2022 /// +-----------uint32_t End-------------------------------------+
2023 /// | |
2024 /// | +--------uint32_t Begin--------------------+ |
2025 /// | | | |
2026 /// ^ ^ v v
2027 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2028 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2029 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2030 /// v v ^ ^
2031 /// | | | |
2032 /// | +--------uint32_t Begin------------+ |
2033 /// | |
2034 /// +-----------uint32_t End-----------------------------+
2035 ///
2036 ///
2037 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2038 /// list. These descriptions are installed and managed by this class, and
2039 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2040 ///
2041 /// DU is an additional descriptor installed by User's 'operator new' to keep
2042 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2043 /// access or modify DU in any way, it's an implementation detail private to
2044 /// User.
2045 ///
2046 /// The regular Use& vector for the User starts at U0. The operand bundle
2047 /// uses are part of the Use& vector, just like normal uses. In the diagram
2048 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2049 /// BundleOpInfo has information about a contiguous set of uses constituting
2050 /// an operand bundle, and the total set of operand bundle uses themselves
2051 /// form a contiguous set of uses (i.e. there are no gaps between uses
2052 /// corresponding to individual operand bundles).
2053 ///
2054 /// This class does not know the location of the set of operand bundle uses
2055 /// within the use list -- that is decided by the User using this class via
2056 /// the BeginIdx argument in populateBundleOperandInfos.
2057 ///
2058 /// Currently operand bundle users with hung-off operands are not supported.
2059 bundle_op_iterator bundle_op_info_begin() {
2060 if (!hasDescriptor())
2061 return nullptr;
2062
2063 uint8_t *BytesBegin = getDescriptor().begin();
2064 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2065 }
2066
2067 /// Return the start of the list of BundleOpInfo instances associated
2068 /// with this OperandBundleUser.
2069 const_bundle_op_iterator bundle_op_info_begin() const {
2070 auto *NonConstThis = const_cast<CallBase *>(this);
2071 return NonConstThis->bundle_op_info_begin();
2072 }
2073
2074 /// Return the end of the list of BundleOpInfo instances associated
2075 /// with this OperandBundleUser.
2076 bundle_op_iterator bundle_op_info_end() {
2077 if (!hasDescriptor())
2078 return nullptr;
2079
2080 uint8_t *BytesEnd = getDescriptor().end();
2081 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2082 }
2083
2084 /// Return the end of the list of BundleOpInfo instances associated
2085 /// with this OperandBundleUser.
2086 const_bundle_op_iterator bundle_op_info_end() const {
2087 auto *NonConstThis = const_cast<CallBase *>(this);
2088 return NonConstThis->bundle_op_info_end();
2089 }
2090
2091 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2092 iterator_range<bundle_op_iterator> bundle_op_infos() {
2093 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2094 }
2095
2096 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2097 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2098 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2099 }
2100
2101 /// Populate the BundleOpInfo instances and the Use& vector from \p
2102 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2103 /// last bundle operand use.
2104 ///
2105 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2106 /// instance allocated in this User's descriptor.
2107 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2108 const unsigned BeginIndex);
2109
2110 /// Return the BundleOpInfo for the operand at index OpIdx.
2111 ///
2112 /// It is an error to call this with an OpIdx that does not correspond to an
2113 /// bundle operand.
2114 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2115 for (auto &BOI : bundle_op_infos())
2116 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
2117 return BOI;
2118
2119 llvm_unreachable("Did not find operand bundle for operand!")::llvm::llvm_unreachable_internal("Did not find operand bundle for operand!"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 2119)
;
2120 }
2121
2122protected:
2123 /// Return the total number of values used in \p Bundles.
2124 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2125 unsigned Total = 0;
2126 for (auto &B : Bundles)
2127 Total += B.input_size();
2128 return Total;
2129 }
2130
2131 /// @}
2132 // End of operand bundle API.
2133
2134private:
2135 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2136 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2137
2138 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2139 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Kind))
2140 return true;
2141
2142 // Operand bundles override attributes on the called function, but don't
2143 // override attributes directly present on the call instruction.
2144 if (isFnAttrDisallowedByOpBundle(Kind))
2145 return false;
2146
2147 return hasFnAttrOnCalledFunction(Kind);
2148 }
2149};
2150
2151template <>
2152struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2153
2154DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { ((i_nocapture < OperandTraits<CallBase
>::operands(this) && "getOperand() out of range!")
? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 2154, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<CallBase>::op_begin(const_cast<CallBase
*>(this))[i_nocapture].get()); } void CallBase::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture <
OperandTraits<CallBase>::operands(this) && "setOperand() out of range!"
) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CallBase>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 2154, __PRETTY_FUNCTION__)); OperandTraits<CallBase>::
op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CallBase
::getNumOperands() const { return OperandTraits<CallBase>
::operands(this); } template <int Idx_nocapture> Use &
CallBase::Op() { return this->OpFrom<Idx_nocapture>(
this); } template <int Idx_nocapture> const Use &CallBase
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2155
2156//===----------------------------------------------------------------------===//
2157// FuncletPadInst Class
2158//===----------------------------------------------------------------------===//
2159class FuncletPadInst : public Instruction {
2160private:
2161 FuncletPadInst(const FuncletPadInst &CPI);
2162
2163 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2164 ArrayRef<Value *> Args, unsigned Values,
2165 const Twine &NameStr, Instruction *InsertBefore);
2166 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2167 ArrayRef<Value *> Args, unsigned Values,
2168 const Twine &NameStr, BasicBlock *InsertAtEnd);
2169
2170 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2171
2172protected:
2173 // Note: Instruction needs to be a friend here to call cloneImpl.
2174 friend class Instruction;
2175 friend class CatchPadInst;
2176 friend class CleanupPadInst;
2177
2178 FuncletPadInst *cloneImpl() const;
2179
2180public:
2181 /// Provide fast operand accessors
2182 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2183
2184 /// getNumArgOperands - Return the number of funcletpad arguments.
2185 ///
2186 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2187
2188 /// Convenience accessors
2189
2190 /// Return the outer EH-pad this funclet is nested within.
2191 ///
2192 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2193 /// is a CatchPadInst.
2194 Value *getParentPad() const { return Op<-1>(); }
2195 void setParentPad(Value *ParentPad) {
2196 assert(ParentPad)((ParentPad) ? static_cast<void> (0) : __assert_fail ("ParentPad"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 2196, __PRETTY_FUNCTION__))
;
2197 Op<-1>() = ParentPad;
2198 }
2199
2200 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2201 ///
2202 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2203 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2204
2205 /// arg_operands - iteration adapter for range-for loops.
2206 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2207
2208 /// arg_operands - iteration adapter for range-for loops.
2209 const_op_range arg_operands() const {
2210 return const_op_range(op_begin(), op_end() - 1);
2211 }
2212
2213 // Methods for support type inquiry through isa, cast, and dyn_cast:
2214 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2215 static bool classof(const Value *V) {
2216 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2217 }
2218};
2219
2220template <>
2221struct OperandTraits<FuncletPadInst>
2222 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2223
2224DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { ((i_nocapture <
OperandTraits<FuncletPadInst>::operands(this) &&
"getOperand() out of range!") ? static_cast<void> (0) :
__assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 2224, __PRETTY_FUNCTION__)); return cast_or_null<Value>
( OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this))[i_nocapture].get()); } void FuncletPadInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { ((
i_nocapture < OperandTraits<FuncletPadInst>::operands
(this) && "setOperand() out of range!") ? static_cast
<void> (0) : __assert_fail ("i_nocapture < OperandTraits<FuncletPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include/llvm/IR/InstrTypes.h"
, 2224, __PRETTY_FUNCTION__)); OperandTraits<FuncletPadInst
>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
FuncletPadInst::getNumOperands() const { return OperandTraits
<FuncletPadInst>::operands(this); } template <int Idx_nocapture
> Use &FuncletPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &FuncletPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2225
2226} // end namespace llvm
2227
2228#endif // LLVM_IR_INSTRTYPES_H