Bug Summary

File:llvm/include/llvm/IR/IRBuilder.h
Warning:line 1700, column 28
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name OMPIRBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/build-llvm/lib/Frontend/OpenMP -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2021-01-24-223304-31662-1 -x c++ /build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
16
17#include "llvm/ADT/StringRef.h"
18#include "llvm/ADT/Triple.h"
19#include "llvm/IR/CFG.h"
20#include "llvm/IR/DebugInfo.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/MDBuilder.h"
23#include "llvm/Support/CommandLine.h"
24#include "llvm/Support/Error.h"
25#include "llvm/Transforms/Utils/BasicBlockUtils.h"
26#include "llvm/Transforms/Utils/CodeExtractor.h"
27
28#include <sstream>
29
30#define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder"
31
32using namespace llvm;
33using namespace omp;
34
35static cl::opt<bool>
36 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
37 cl::desc("Use optimistic attributes describing "
38 "'as-if' properties of runtime calls."),
39 cl::init(false));
40
41void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
42 LLVMContext &Ctx = Fn.getContext();
43
44#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
45#include "llvm/Frontend/OpenMP/OMPKinds.def"
46
47 // Add attributes to the new declaration.
48 switch (FnID) {
49#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
50 case Enum: \
51 Fn.setAttributes( \
52 AttributeList::get(Ctx, FnAttrSet, RetAttrSet, ArgAttrSets)); \
53 break;
54#include "llvm/Frontend/OpenMP/OMPKinds.def"
55 default:
56 // Attributes are optional.
57 break;
58 }
59}
60
61FunctionCallee
62OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) {
63 FunctionType *FnTy = nullptr;
64 Function *Fn = nullptr;
65
66 // Try to find the declation in the module first.
67 switch (FnID) {
68#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
69 case Enum: \
70 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
71 IsVarArg); \
72 Fn = M.getFunction(Str); \
73 break;
74#include "llvm/Frontend/OpenMP/OMPKinds.def"
75 }
76
77 if (!Fn) {
78 // Create a new declaration if we need one.
79 switch (FnID) {
80#define OMP_RTL(Enum, Str, ...) \
81 case Enum: \
82 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
83 break;
84#include "llvm/Frontend/OpenMP/OMPKinds.def"
85 }
86
87 // Add information if the runtime function takes a callback function
88 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
89 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
90 LLVMContext &Ctx = Fn->getContext();
91 MDBuilder MDB(Ctx);
92 // Annotate the callback behavior of the runtime function:
93 // - The callback callee is argument number 2 (microtask).
94 // - The first two arguments of the callback callee are unknown (-1).
95 // - All variadic arguments to the runtime function are passed to the
96 // callback callee.
97 Fn->addMetadata(
98 LLVMContext::MD_callback,
99 *MDNode::get(Ctx, {MDB.createCallbackEncoding(
100 2, {-1, -1}, /* VarArgsArePassed */ true)}));
101 }
102 }
103
104 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
105 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
106 addAttributes(FnID, *Fn);
107
108 } else {
109 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
110 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
111 }
112
113 assert(Fn && "Failed to create OpenMP runtime function")((Fn && "Failed to create OpenMP runtime function") ?
static_cast<void> (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 113, __PRETTY_FUNCTION__))
;
114
115 // Cast the function to the expected type if necessary
116 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo());
117 return {FnTy, C};
118}
119
120Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) {
121 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID);
122 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
123 assert(Fn && "Failed to create OpenMP runtime function pointer")((Fn && "Failed to create OpenMP runtime function pointer"
) ? static_cast<void> (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 123, __PRETTY_FUNCTION__))
;
124 return Fn;
125}
126
127void OpenMPIRBuilder::initialize() { initializeTypes(M); }
128
129void OpenMPIRBuilder::finalize(bool AllowExtractorSinking) {
130 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
131 SmallVector<BasicBlock *, 32> Blocks;
132 for (OutlineInfo &OI : OutlineInfos) {
133 ParallelRegionBlockSet.clear();
134 Blocks.clear();
135 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
136
137 Function *OuterFn = OI.EntryBB->getParent();
138 CodeExtractorAnalysisCache CEAC(*OuterFn);
139 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
140 /* AggregateArgs */ false,
141 /* BlockFrequencyInfo */ nullptr,
142 /* BranchProbabilityInfo */ nullptr,
143 /* AssumptionCache */ nullptr,
144 /* AllowVarArgs */ true,
145 /* AllowAlloca */ true,
146 /* Suffix */ ".omp_par");
147
148 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before outlining: "
<< *OuterFn << "\n"; } } while (false)
;
149 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
150 << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
;
151 assert(Extractor.isEligible() &&((Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? static_cast<void> (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 152, __PRETTY_FUNCTION__))
152 "Expected OpenMP outlining to be possible!")((Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? static_cast<void> (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 152, __PRETTY_FUNCTION__))
;
153
154 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
155
156 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After outlining: "
<< *OuterFn << "\n"; } } while (false)
;
157 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << " Outlined function: "
<< *OutlinedFn << "\n"; } } while (false)
;
158 assert(OutlinedFn->getReturnType()->isVoidTy() &&((OutlinedFn->getReturnType()->isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? static_cast<void> (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 159, __PRETTY_FUNCTION__))
159 "OpenMP outlined functions should not return a value!")((OutlinedFn->getReturnType()->isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? static_cast<void> (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 159, __PRETTY_FUNCTION__))
;
160
161 // For compability with the clang CG we move the outlined function after the
162 // one with the parallel region.
163 OutlinedFn->removeFromParent();
164 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
165
166 // Remove the artificial entry introduced by the extractor right away, we
167 // made our own entry block after all.
168 {
169 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
170 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)((ArtificialEntry.getUniqueSuccessor() == OI.EntryBB) ? static_cast
<void> (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 170, __PRETTY_FUNCTION__))
;
171 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)((OI.EntryBB->getUniquePredecessor() == &ArtificialEntry
) ? static_cast<void> (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 171, __PRETTY_FUNCTION__))
;
172 if (AllowExtractorSinking) {
173 // Move instructions from the to-be-deleted ArtificialEntry to the entry
174 // basic block of the parallel region. CodeExtractor may have sunk
175 // allocas/bitcasts for values that are solely used in the outlined
176 // region and do not escape.
177 assert(!ArtificialEntry.empty() &&((!ArtificialEntry.empty() && "Expected instructions to sink in the outlined region"
) ? static_cast<void> (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 178, __PRETTY_FUNCTION__))
178 "Expected instructions to sink in the outlined region")((!ArtificialEntry.empty() && "Expected instructions to sink in the outlined region"
) ? static_cast<void> (0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 178, __PRETTY_FUNCTION__))
;
179 for (BasicBlock::iterator It = ArtificialEntry.begin(),
180 End = ArtificialEntry.end();
181 It != End;) {
182 Instruction &I = *It;
183 It++;
184
185 if (I.isTerminator())
186 continue;
187
188 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
189 }
190 }
191 OI.EntryBB->moveBefore(&ArtificialEntry);
192 ArtificialEntry.eraseFromParent();
193 }
194 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)((&OutlinedFn->getEntryBlock() == OI.EntryBB) ? static_cast
<void> (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 194, __PRETTY_FUNCTION__))
;
195 assert(OutlinedFn && OutlinedFn->getNumUses() == 1)((OutlinedFn && OutlinedFn->getNumUses() == 1) ? static_cast
<void> (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 195, __PRETTY_FUNCTION__))
;
196
197 // Run a user callback, e.g. to add attributes.
198 if (OI.PostOutlineCB)
199 OI.PostOutlineCB(*OutlinedFn);
200 }
201
202 // Allow finalize to be called multiple times.
203 OutlineInfos.clear();
204}
205
206Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
207 IdentFlag LocFlags,
208 unsigned Reserve2Flags) {
209 // Enable "C-mode".
210 LocFlags |= OMP_IDENT_FLAG_KMPC;
211
212 Value *&Ident =
213 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
214 if (!Ident) {
215 Constant *I32Null = ConstantInt::getNullValue(Int32);
216 Constant *IdentData[] = {
217 I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)),
218 ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr};
219 Constant *Initializer = ConstantStruct::get(
220 cast<StructType>(IdentPtr->getPointerElementType()), IdentData);
221
222 // Look for existing encoding of the location + flags, not needed but
223 // minimizes the difference to the existing solution while we transition.
224 for (GlobalVariable &GV : M.getGlobalList())
225 if (GV.getType() == IdentPtr && GV.hasInitializer())
226 if (GV.getInitializer() == Initializer)
227 return Ident = &GV;
228
229 auto *GV = new GlobalVariable(M, IdentPtr->getPointerElementType(),
230 /* isConstant = */ true,
231 GlobalValue::PrivateLinkage, Initializer);
232 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
233 GV->setAlignment(Align(8));
234 Ident = GV;
235 }
236 return Builder.CreatePointerCast(Ident, IdentPtr);
237}
238
239Type *OpenMPIRBuilder::getLanemaskType() {
240 LLVMContext &Ctx = M.getContext();
241 Triple triple(M.getTargetTriple());
242
243 // This test is adequate until deviceRTL has finer grained lane widths
244 return triple.isAMDGCN() ? Type::getInt64Ty(Ctx) : Type::getInt32Ty(Ctx);
245}
246
247Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) {
248 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
249 if (!SrcLocStr) {
250 Constant *Initializer =
251 ConstantDataArray::getString(M.getContext(), LocStr);
252
253 // Look for existing encoding of the location, not needed but minimizes the
254 // difference to the existing solution while we transition.
255 for (GlobalVariable &GV : M.getGlobalList())
256 if (GV.isConstant() && GV.hasInitializer() &&
257 GV.getInitializer() == Initializer)
258 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
259
260 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
261 /* AddressSpace */ 0, &M);
262 }
263 return SrcLocStr;
264}
265
266Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
267 StringRef FileName,
268 unsigned Line,
269 unsigned Column) {
270 SmallString<128> Buffer;
271 Buffer.push_back(';');
272 Buffer.append(FileName);
273 Buffer.push_back(';');
274 Buffer.append(FunctionName);
275 Buffer.push_back(';');
276 Buffer.append(std::to_string(Line));
277 Buffer.push_back(';');
278 Buffer.append(std::to_string(Column));
279 Buffer.push_back(';');
280 Buffer.push_back(';');
281 return getOrCreateSrcLocStr(Buffer.str());
282}
283
284Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() {
285 return getOrCreateSrcLocStr(";unknown;unknown;0;0;;");
286}
287
288Constant *
289OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) {
290 DILocation *DIL = Loc.DL.get();
291 if (!DIL)
292 return getOrCreateDefaultSrcLocStr();
293 StringRef FileName = M.getName();
294 if (DIFile *DIF = DIL->getFile())
295 if (Optional<StringRef> Source = DIF->getSource())
296 FileName = *Source;
297 StringRef Function = DIL->getScope()->getSubprogram()->getName();
298 Function =
299 !Function.empty() ? Function : Loc.IP.getBlock()->getParent()->getName();
300 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
301 DIL->getColumn());
302}
303
304Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) {
305 return Builder.CreateCall(
306 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
307 "omp_global_thread_num");
308}
309
310OpenMPIRBuilder::InsertPointTy
311OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK,
312 bool ForceSimpleCall, bool CheckCancelFlag) {
313 if (!updateToLocation(Loc))
314 return Loc.IP;
315 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
316}
317
318OpenMPIRBuilder::InsertPointTy
319OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind,
320 bool ForceSimpleCall, bool CheckCancelFlag) {
321 // Build call __kmpc_cancel_barrier(loc, thread_id) or
322 // __kmpc_barrier(loc, thread_id);
323
324 IdentFlag BarrierLocFlags;
325 switch (Kind) {
326 case OMPD_for:
327 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
328 break;
329 case OMPD_sections:
330 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
331 break;
332 case OMPD_single:
333 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
334 break;
335 case OMPD_barrier:
336 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
337 break;
338 default:
339 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
340 break;
341 }
342
343 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
344 Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags),
345 getOrCreateThreadID(getOrCreateIdent(SrcLocStr))};
346
347 // If we are in a cancellable parallel region, barriers are cancellation
348 // points.
349 // TODO: Check why we would force simple calls or to ignore the cancel flag.
350 bool UseCancelBarrier =
351 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
352
353 Value *Result =
354 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(
355 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
356 : OMPRTL___kmpc_barrier),
357 Args);
358
359 if (UseCancelBarrier && CheckCancelFlag)
360 emitCancelationCheckImpl(Result, OMPD_parallel);
361
362 return Builder.saveIP();
363}
364
365OpenMPIRBuilder::InsertPointTy
366OpenMPIRBuilder::createCancel(const LocationDescription &Loc,
367 Value *IfCondition,
368 omp::Directive CanceledDirective) {
369 if (!updateToLocation(Loc))
370 return Loc.IP;
371
372 // LLVM utilities like blocks with terminators.
373 auto *UI = Builder.CreateUnreachable();
374
375 Instruction *ThenTI = UI, *ElseTI = nullptr;
376 if (IfCondition)
377 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
378 Builder.SetInsertPoint(ThenTI);
379
380 Value *CancelKind = nullptr;
381 switch (CanceledDirective) {
382#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
383 case DirectiveEnum: \
384 CancelKind = Builder.getInt32(Value); \
385 break;
386#include "llvm/Frontend/OpenMP/OMPKinds.def"
387 default:
388 llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 388)
;
389 }
390
391 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
392 Value *Ident = getOrCreateIdent(SrcLocStr);
393 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
394 Value *Result = Builder.CreateCall(
395 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
396
397 // The actual cancel logic is shared with others, e.g., cancel_barriers.
398 emitCancelationCheckImpl(Result, CanceledDirective);
399
400 // Update the insertion point and remove the terminator we introduced.
401 Builder.SetInsertPoint(UI->getParent());
402 UI->eraseFromParent();
403
404 return Builder.saveIP();
405}
406
407void OpenMPIRBuilder::emitCancelationCheckImpl(
408 Value *CancelFlag, omp::Directive CanceledDirective) {
409 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&((isLastFinalizationInfoCancellable(CanceledDirective) &&
"Unexpected cancellation!") ? static_cast<void> (0) : __assert_fail
("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 410, __PRETTY_FUNCTION__))
410 "Unexpected cancellation!")((isLastFinalizationInfoCancellable(CanceledDirective) &&
"Unexpected cancellation!") ? static_cast<void> (0) : __assert_fail
("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 410, __PRETTY_FUNCTION__))
;
411
412 // For a cancel barrier we create two new blocks.
413 BasicBlock *BB = Builder.GetInsertBlock();
414 BasicBlock *NonCancellationBlock;
415 if (Builder.GetInsertPoint() == BB->end()) {
416 // TODO: This branch will not be needed once we moved to the
417 // OpenMPIRBuilder codegen completely.
418 NonCancellationBlock = BasicBlock::Create(
419 BB->getContext(), BB->getName() + ".cont", BB->getParent());
420 } else {
421 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
422 BB->getTerminator()->eraseFromParent();
423 Builder.SetInsertPoint(BB);
424 }
425 BasicBlock *CancellationBlock = BasicBlock::Create(
426 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
427
428 // Jump to them based on the return value.
429 Value *Cmp = Builder.CreateIsNull(CancelFlag);
430 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
431 /* TODO weight */ nullptr, nullptr);
432
433 // From the cancellation block we finalize all variables and go to the
434 // post finalization block that is known to the FiniCB callback.
435 Builder.SetInsertPoint(CancellationBlock);
436 auto &FI = FinalizationStack.back();
437 FI.FiniCB(Builder.saveIP());
438
439 // The continuation block is where code generation continues.
440 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
441}
442
443IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
444 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
445 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
446 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
447 omp::ProcBindKind ProcBind, bool IsCancellable) {
448 if (!updateToLocation(Loc))
449 return Loc.IP;
450
451 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
452 Value *Ident = getOrCreateIdent(SrcLocStr);
453 Value *ThreadID = getOrCreateThreadID(Ident);
454
455 if (NumThreads) {
456 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
457 Value *Args[] = {
458 Ident, ThreadID,
459 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
460 Builder.CreateCall(
461 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
462 }
463
464 if (ProcBind != OMP_PROC_BIND_default) {
465 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
466 Value *Args[] = {
467 Ident, ThreadID,
468 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
469 Builder.CreateCall(
470 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
471 }
472
473 BasicBlock *InsertBB = Builder.GetInsertBlock();
474 Function *OuterFn = InsertBB->getParent();
475
476 // Save the outer alloca block because the insertion iterator may get
477 // invalidated and we still need this later.
478 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
479
480 // Vector to remember instructions we used only during the modeling but which
481 // we want to delete at the end.
482 SmallVector<Instruction *, 4> ToBeDeleted;
483
484 // Change the location to the outer alloca insertion point to create and
485 // initialize the allocas we pass into the parallel region.
486 Builder.restoreIP(OuterAllocaIP);
487 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
488 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr");
489
490 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the
491 // program, otherwise we only need them for modeling purposes to get the
492 // associated arguments in the outlined function. In the former case,
493 // initialize the allocas properly, in the latter case, delete them later.
494 if (IfCondition) {
495 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr);
496 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr);
497 } else {
498 ToBeDeleted.push_back(TIDAddr);
499 ToBeDeleted.push_back(ZeroAddr);
500 }
501
502 // Create an artificial insertion point that will also ensure the blocks we
503 // are about to split are not degenerated.
504 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
505
506 Instruction *ThenTI = UI, *ElseTI = nullptr;
507 if (IfCondition)
508 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
509
510 BasicBlock *ThenBB = ThenTI->getParent();
511 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry");
512 BasicBlock *PRegBodyBB =
513 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region");
514 BasicBlock *PRegPreFiniBB =
515 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize");
516 BasicBlock *PRegExitBB =
517 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit");
518
519 auto FiniCBWrapper = [&](InsertPointTy IP) {
520 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
521 // target to the region exit block.
522 if (IP.getBlock()->end() == IP.getPoint()) {
523 IRBuilder<>::InsertPointGuard IPG(Builder);
524 Builder.restoreIP(IP);
525 Instruction *I = Builder.CreateBr(PRegExitBB);
526 IP = InsertPointTy(I->getParent(), I->getIterator());
527 }
528 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&((IP.getBlock()->getTerminator()->getNumSuccessors() ==
1 && IP.getBlock()->getTerminator()->getSuccessor
(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? static_cast<void> (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 530, __PRETTY_FUNCTION__))
529 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&((IP.getBlock()->getTerminator()->getNumSuccessors() ==
1 && IP.getBlock()->getTerminator()->getSuccessor
(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? static_cast<void> (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 530, __PRETTY_FUNCTION__))
530 "Unexpected insertion point for finalization call!")((IP.getBlock()->getTerminator()->getNumSuccessors() ==
1 && IP.getBlock()->getTerminator()->getSuccessor
(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? static_cast<void> (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 530, __PRETTY_FUNCTION__))
;
531 return FiniCB(IP);
532 };
533
534 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
535
536 // Generate the privatization allocas in the block that will become the entry
537 // of the outlined function.
538 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
539 InsertPointTy InnerAllocaIP = Builder.saveIP();
540
541 AllocaInst *PrivTIDAddr =
542 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
543 Instruction *PrivTID = Builder.CreateLoad(PrivTIDAddr, "tid");
544
545 // Add some fake uses for OpenMP provided arguments.
546 ToBeDeleted.push_back(Builder.CreateLoad(TIDAddr, "tid.addr.use"));
547 Instruction *ZeroAddrUse = Builder.CreateLoad(ZeroAddr, "zero.addr.use");
548 ToBeDeleted.push_back(ZeroAddrUse);
549
550 // ThenBB
551 // |
552 // V
553 // PRegionEntryBB <- Privatization allocas are placed here.
554 // |
555 // V
556 // PRegionBodyBB <- BodeGen is invoked here.
557 // |
558 // V
559 // PRegPreFiniBB <- The block we will start finalization from.
560 // |
561 // V
562 // PRegionExitBB <- A common exit to simplify block collection.
563 //
564
565 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
566
567 // Let the caller create the body.
568 assert(BodyGenCB && "Expected body generation callback!")((BodyGenCB && "Expected body generation callback!") ?
static_cast<void> (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 568, __PRETTY_FUNCTION__))
;
569 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
570 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB);
571
572 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
573
574 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
575 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
576 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
577 llvm::LLVMContext &Ctx = F->getContext();
578 MDBuilder MDB(Ctx);
579 // Annotate the callback behavior of the __kmpc_fork_call:
580 // - The callback callee is argument number 2 (microtask).
581 // - The first two arguments of the callback callee are unknown (-1).
582 // - All variadic arguments to the __kmpc_fork_call are passed to the
583 // callback callee.
584 F->addMetadata(
585 llvm::LLVMContext::MD_callback,
586 *llvm::MDNode::get(
587 Ctx, {MDB.createCallbackEncoding(2, {-1, -1},
588 /* VarArgsArePassed */ true)}));
589 }
590 }
591
592 OutlineInfo OI;
593 OI.PostOutlineCB = [=](Function &OutlinedFn) {
594 // Add some known attributes.
595 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
596 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
597 OutlinedFn.addFnAttr(Attribute::NoUnwind);
598 OutlinedFn.addFnAttr(Attribute::NoRecurse);
599
600 assert(OutlinedFn.arg_size() >= 2 &&((OutlinedFn.arg_size() >= 2 && "Expected at least tid and bounded tid as arguments"
) ? static_cast<void> (0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 601, __PRETTY_FUNCTION__))
601 "Expected at least tid and bounded tid as arguments")((OutlinedFn.arg_size() >= 2 && "Expected at least tid and bounded tid as arguments"
) ? static_cast<void> (0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 601, __PRETTY_FUNCTION__))
;
602 unsigned NumCapturedVars =
603 OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
604
605 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
606 CI->getParent()->setName("omp_parallel");
607 Builder.SetInsertPoint(CI);
608
609 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn);
610 Value *ForkCallArgs[] = {
611 Ident, Builder.getInt32(NumCapturedVars),
612 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)};
613
614 SmallVector<Value *, 16> RealArgs;
615 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
616 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
617
618 Builder.CreateCall(RTLFn, RealArgs);
619
620 LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
621 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
622
623 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end());
624
625 // Initialize the local TID stack location with the argument value.
626 Builder.SetInsertPoint(PrivTID);
627 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
628 Builder.CreateStore(Builder.CreateLoad(OutlinedAI), PrivTIDAddr);
629
630 // If no "if" clause was present we do not need the call created during
631 // outlining, otherwise we reuse it in the serialized parallel region.
632 if (!ElseTI) {
633 CI->eraseFromParent();
634 } else {
635
636 // If an "if" clause was present we are now generating the serialized
637 // version into the "else" branch.
638 Builder.SetInsertPoint(ElseTI);
639
640 // Build calls __kmpc_serialized_parallel(&Ident, GTid);
641 Value *SerializedParallelCallArgs[] = {Ident, ThreadID};
642 Builder.CreateCall(
643 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel),
644 SerializedParallelCallArgs);
645
646 // OutlinedFn(&GTid, &zero, CapturedStruct);
647 CI->removeFromParent();
648 Builder.Insert(CI);
649
650 // __kmpc_end_serialized_parallel(&Ident, GTid);
651 Value *EndArgs[] = {Ident, ThreadID};
652 Builder.CreateCall(
653 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel),
654 EndArgs);
655
656 LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
657 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
658 }
659
660 for (Instruction *I : ToBeDeleted)
661 I->eraseFromParent();
662 };
663
664 // Adjust the finalization stack, verify the adjustment, and call the
665 // finalize function a last time to finalize values between the pre-fini
666 // block and the exit block if we left the parallel "the normal way".
667 auto FiniInfo = FinalizationStack.pop_back_val();
668 (void)FiniInfo;
669 assert(FiniInfo.DK == OMPD_parallel &&((FiniInfo.DK == OMPD_parallel && "Unexpected finalization stack state!"
) ? static_cast<void> (0) : __assert_fail ("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 670, __PRETTY_FUNCTION__))
670 "Unexpected finalization stack state!")((FiniInfo.DK == OMPD_parallel && "Unexpected finalization stack state!"
) ? static_cast<void> (0) : __assert_fail ("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 670, __PRETTY_FUNCTION__))
;
671
672 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
673
674 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
675 FiniCB(PreFiniIP);
676
677 OI.EntryBB = PRegEntryBB;
678 OI.ExitBB = PRegExitBB;
679
680 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
681 SmallVector<BasicBlock *, 32> Blocks;
682 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
683
684 // Ensure a single exit node for the outlined region by creating one.
685 // We might have multiple incoming edges to the exit now due to finalizations,
686 // e.g., cancel calls that cause the control flow to leave the region.
687 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
688 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
689 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
690 Blocks.push_back(PRegOutlinedExitBB);
691
692 CodeExtractorAnalysisCache CEAC(*OuterFn);
693 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
694 /* AggregateArgs */ false,
695 /* BlockFrequencyInfo */ nullptr,
696 /* BranchProbabilityInfo */ nullptr,
697 /* AssumptionCache */ nullptr,
698 /* AllowVarArgs */ true,
699 /* AllowAlloca */ true,
700 /* Suffix */ ".omp_par");
701
702 // Find inputs to, outputs from the code region.
703 BasicBlock *CommonExit = nullptr;
704 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
705 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
706 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
707
708 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before privatization: "
<< *OuterFn << "\n"; } } while (false)
;
709
710 FunctionCallee TIDRTLFn =
711 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
712
713 auto PrivHelper = [&](Value &V) {
714 if (&V == TIDAddr || &V == ZeroAddr)
715 return;
716
717 SetVector<Use *> Uses;
718 for (Use &U : V.uses())
719 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
720 if (ParallelRegionBlockSet.count(UserI->getParent()))
721 Uses.insert(&U);
722
723 // __kmpc_fork_call expects extra arguments as pointers. If the input
724 // already has a pointer type, everything is fine. Otherwise, store the
725 // value onto stack and load it back inside the to-be-outlined region. This
726 // will ensure only the pointer will be passed to the function.
727 // FIXME: if there are more than 15 trailing arguments, they must be
728 // additionally packed in a struct.
729 Value *Inner = &V;
730 if (!V.getType()->isPointerTy()) {
731 IRBuilder<>::InsertPointGuard Guard(Builder);
732 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: "
<< V << "\n"; } } while (false)
;
733
734 Builder.restoreIP(OuterAllocaIP);
735 Value *Ptr =
736 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
737
738 // Store to stack at end of the block that currently branches to the entry
739 // block of the to-be-outlined region.
740 Builder.SetInsertPoint(InsertBB,
741 InsertBB->getTerminator()->getIterator());
742 Builder.CreateStore(&V, Ptr);
743
744 // Load back next to allocations in the to-be-outlined region.
745 Builder.restoreIP(InnerAllocaIP);
746 Inner = Builder.CreateLoad(Ptr);
747 }
748
749 Value *ReplacementValue = nullptr;
750 CallInst *CI = dyn_cast<CallInst>(&V);
751 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
752 ReplacementValue = PrivTID;
753 } else {
754 Builder.restoreIP(
755 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
756 assert(ReplacementValue &&((ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? static_cast<void> (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 757, __PRETTY_FUNCTION__))
757 "Expected copy/create callback to set replacement value!")((ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? static_cast<void> (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 757, __PRETTY_FUNCTION__))
;
758 if (ReplacementValue == &V)
759 return;
760 }
761
762 for (Use *UPtr : Uses)
763 UPtr->set(ReplacementValue);
764 };
765
766 // Reset the inner alloca insertion as it will be used for loading the values
767 // wrapped into pointers before passing them into the to-be-outlined region.
768 // Configure it to insert immediately after the fake use of zero address so
769 // that they are available in the generated body and so that the
770 // OpenMP-related values (thread ID and zero address pointers) remain leading
771 // in the argument list.
772 InnerAllocaIP = IRBuilder<>::InsertPoint(
773 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
774
775 // Reset the outer alloca insertion point to the entry of the relevant block
776 // in case it was invalidated.
777 OuterAllocaIP = IRBuilder<>::InsertPoint(
778 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
779
780 for (Value *Input : Inputs) {
781 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Captured input: " <<
*Input << "\n"; } } while (false)
;
782 PrivHelper(*Input);
783 }
784 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
785 for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
786 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
787 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
;
788 assert(Outputs.empty() &&((Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? static_cast<void> (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 789, __PRETTY_FUNCTION__))
789 "OpenMP outlining should not produce live-out values!")((Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? static_cast<void> (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 789, __PRETTY_FUNCTION__))
;
790
791 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After privatization: "
<< *OuterFn << "\n"; } } while (false)
;
792 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
793 for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
794 dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
795 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
;
796
797 // Register the outlined info.
798 addOutlineInfo(std::move(OI));
799
800 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
801 UI->eraseFromParent();
802
803 return AfterIP;
804}
805
806void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) {
807 // Build call void __kmpc_flush(ident_t *loc)
808 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
809 Value *Args[] = {getOrCreateIdent(SrcLocStr)};
810
811 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
812}
813
814void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) {
815 if (!updateToLocation(Loc))
816 return;
817 emitFlush(Loc);
818}
819
820void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) {
821 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
822 // global_tid);
823 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
824 Value *Ident = getOrCreateIdent(SrcLocStr);
825 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
826
827 // Ignore return result until untied tasks are supported.
828 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
829 Args);
830}
831
832void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) {
833 if (!updateToLocation(Loc))
834 return;
835 emitTaskwaitImpl(Loc);
836}
837
838void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) {
839 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
840 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
841 Value *Ident = getOrCreateIdent(SrcLocStr);
842 Constant *I32Null = ConstantInt::getNullValue(Int32);
843 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
844
845 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
846 Args);
847}
848
849void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) {
850 if (!updateToLocation(Loc))
851 return;
852 emitTaskyieldImpl(Loc);
853}
854
855OpenMPIRBuilder::InsertPointTy
856OpenMPIRBuilder::createMaster(const LocationDescription &Loc,
857 BodyGenCallbackTy BodyGenCB,
858 FinalizeCallbackTy FiniCB) {
859
860 if (!updateToLocation(Loc))
861 return Loc.IP;
862
863 Directive OMPD = Directive::OMPD_master;
864 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
865 Value *Ident = getOrCreateIdent(SrcLocStr);
866 Value *ThreadId = getOrCreateThreadID(Ident);
867 Value *Args[] = {Ident, ThreadId};
868
869 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
870 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
871
872 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
873 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
874
875 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
876 /*Conditional*/ true, /*hasFinalize*/ true);
877}
878
879CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton(
880 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
881 BasicBlock *PostInsertBefore, const Twine &Name) {
882 Module *M = F->getParent();
883 LLVMContext &Ctx = M->getContext();
884 Type *IndVarTy = TripCount->getType();
885
886 // Create the basic block structure.
887 BasicBlock *Preheader =
888 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
889 BasicBlock *Header =
890 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
891 BasicBlock *Cond =
892 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
893 BasicBlock *Body =
894 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
895 BasicBlock *Latch =
896 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
897 BasicBlock *Exit =
898 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
899 BasicBlock *After =
900 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
901
902 // Use specified DebugLoc for new instructions.
903 Builder.SetCurrentDebugLocation(DL);
904
905 Builder.SetInsertPoint(Preheader);
906 Builder.CreateBr(Header);
907
908 Builder.SetInsertPoint(Header);
909 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
910 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
911 Builder.CreateBr(Cond);
912
913 Builder.SetInsertPoint(Cond);
914 Value *Cmp =
915 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
916 Builder.CreateCondBr(Cmp, Body, Exit);
917
918 Builder.SetInsertPoint(Body);
919 Builder.CreateBr(Latch);
920
921 Builder.SetInsertPoint(Latch);
922 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
923 "omp_" + Name + ".next", /*HasNUW=*/true);
924 Builder.CreateBr(Header);
925 IndVarPHI->addIncoming(Next, Latch);
926
927 Builder.SetInsertPoint(Exit);
928 Builder.CreateBr(After);
929
930 // Remember and return the canonical control flow.
931 LoopInfos.emplace_front();
932 CanonicalLoopInfo *CL = &LoopInfos.front();
933
934 CL->Preheader = Preheader;
935 CL->Header = Header;
936 CL->Cond = Cond;
937 CL->Body = Body;
938 CL->Latch = Latch;
939 CL->Exit = Exit;
940 CL->After = After;
941
942 CL->IsValid = true;
943
944#ifndef NDEBUG
945 CL->assertOK();
946#endif
947 return CL;
948}
949
950CanonicalLoopInfo *
951OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc,
952 LoopBodyGenCallbackTy BodyGenCB,
953 Value *TripCount, const Twine &Name) {
954 BasicBlock *BB = Loc.IP.getBlock();
955 BasicBlock *NextBB = BB->getNextNode();
956
957 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
958 NextBB, NextBB, Name);
959 BasicBlock *After = CL->getAfter();
960
961 // If location is not set, don't connect the loop.
962 if (updateToLocation(Loc)) {
963 // Split the loop at the insertion point: Branch to the preheader and move
964 // every following instruction to after the loop (the After BB). Also, the
965 // new successor is the loop's after block.
966 Builder.CreateBr(CL->Preheader);
967 After->getInstList().splice(After->begin(), BB->getInstList(),
968 Builder.GetInsertPoint(), BB->end());
969 After->replaceSuccessorsPhiUsesWith(BB, After);
970 }
971
972 // Emit the body content. We do it after connecting the loop to the CFG to
973 // avoid that the callback encounters degenerate BBs.
974 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
975
976#ifndef NDEBUG
977 CL->assertOK();
978#endif
979 return CL;
980}
981
982CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop(
983 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
984 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
985 InsertPointTy ComputeIP, const Twine &Name) {
986
987 // Consider the following difficulties (assuming 8-bit signed integers):
988 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
989 // DO I = 1, 100, 50
990 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
991 // DO I = 100, 0, -128
992
993 // Start, Stop and Step must be of the same integer type.
994 auto *IndVarTy = cast<IntegerType>(Start->getType());
995 assert(IndVarTy == Stop->getType() && "Stop type mismatch")((IndVarTy == Stop->getType() && "Stop type mismatch"
) ? static_cast<void> (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 995, __PRETTY_FUNCTION__))
;
996 assert(IndVarTy == Step->getType() && "Step type mismatch")((IndVarTy == Step->getType() && "Step type mismatch"
) ? static_cast<void> (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 996, __PRETTY_FUNCTION__))
;
997
998 LocationDescription ComputeLoc =
999 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
1000 updateToLocation(ComputeLoc);
1001
1002 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
1003 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
1004
1005 // Like Step, but always positive.
1006 Value *Incr = Step;
1007
1008 // Distance between Start and Stop; always positive.
1009 Value *Span;
1010
1011 // Condition whether there are no iterations are executed at all, e.g. because
1012 // UB < LB.
1013 Value *ZeroCmp;
1014
1015 if (IsSigned) {
1016 // Ensure that increment is positive. If not, negate and invert LB and UB.
1017 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
1018 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
1019 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
1020 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
1021 Span = Builder.CreateSub(UB, LB, "", false, true);
1022 ZeroCmp = Builder.CreateICmp(
1023 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
1024 } else {
1025 Span = Builder.CreateSub(Stop, Start, "", true);
1026 ZeroCmp = Builder.CreateICmp(
1027 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
1028 }
1029
1030 Value *CountIfLooping;
1031 if (InclusiveStop) {
1032 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
1033 } else {
1034 // Avoid incrementing past stop since it could overflow.
1035 Value *CountIfTwo = Builder.CreateAdd(
1036 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
1037 Value *OneCmp = Builder.CreateICmp(
1038 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr);
1039 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
1040 }
1041 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
1042 "omp_" + Name + ".tripcount");
1043
1044 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
1045 Builder.restoreIP(CodeGenIP);
1046 Value *Span = Builder.CreateMul(IV, Step);
1047 Value *IndVar = Builder.CreateAdd(Span, Start);
1048 BodyGenCB(Builder.saveIP(), IndVar);
1049 };
1050 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
1051 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
1052}
1053
1054// Returns an LLVM function to call for initializing loop bounds using OpenMP
1055// static scheduling depending on `type`. Only i32 and i64 are supported by the
1056// runtime. Always interpret integers as unsigned similarly to
1057// CanonicalLoopInfo.
1058static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M,
1059 OpenMPIRBuilder &OMPBuilder) {
1060 unsigned Bitwidth = Ty->getIntegerBitWidth();
1061 if (Bitwidth == 32)
1062 return OMPBuilder.getOrCreateRuntimeFunction(
1063 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
1064 if (Bitwidth == 64)
1065 return OMPBuilder.getOrCreateRuntimeFunction(
1066 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
1067 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1067)
;
1068}
1069
1070// Sets the number of loop iterations to the given value. This value must be
1071// valid in the condition block (i.e., defined in the preheader) and is
1072// interpreted as an unsigned integer.
1073void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) {
1074 Instruction *CmpI = &CLI->getCond()->front();
1075 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")((isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"
) ? static_cast<void> (0) : __assert_fail ("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1075, __PRETTY_FUNCTION__))
;
1076 CmpI->setOperand(1, TripCount);
1077 CLI->assertOK();
1078}
1079
1080CanonicalLoopInfo *OpenMPIRBuilder::createStaticWorkshareLoop(
1081 const LocationDescription &Loc, CanonicalLoopInfo *CLI,
1082 InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk) {
1083 // Set up the source location value for OpenMP runtime.
1084 if (!updateToLocation(Loc))
1
Taking false branch
1085 return nullptr;
1086
1087 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1088 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1089
1090 // Declare useful OpenMP runtime functions.
1091 Value *IV = CLI->getIndVar();
1092 Type *IVTy = IV->getType();
1093 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
1094 FunctionCallee StaticFini =
1095 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
1096
1097 // Allocate space for computed loop bounds as expected by the "init" function.
1098 Builder.restoreIP(AllocaIP);
2
Calling 'IRBuilderBase::restoreIP'
7
Returning from 'IRBuilderBase::restoreIP'
1099 Type *I32Type = Type::getInt32Ty(M.getContext());
1100 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
8
Calling 'IRBuilderBase::CreateAlloca'
1101 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1102 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1103 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1104
1105 // At the end of the preheader, prepare for calling the "init" function by
1106 // storing the current loop bounds into the allocated space. A canonical loop
1107 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1108 // and produces an inclusive upper bound.
1109 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
1110 Constant *Zero = ConstantInt::get(IVTy, 0);
1111 Constant *One = ConstantInt::get(IVTy, 1);
1112 Builder.CreateStore(Zero, PLowerBound);
1113 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
1114 Builder.CreateStore(UpperBound, PUpperBound);
1115 Builder.CreateStore(One, PStride);
1116
1117 if (!Chunk)
1118 Chunk = One;
1119
1120 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1121
1122 // TODO: extract scheduling type and map it to OMP constant. This is curently
1123 // happening in kmp.h and its ilk and needs to be moved to OpenMP.td first.
1124 constexpr int StaticSchedType = 34;
1125 Constant *SchedulingType = ConstantInt::get(I32Type, StaticSchedType);
1126
1127 // Call the "init" function and update the trip count of the loop with the
1128 // value it produced.
1129 Builder.CreateCall(StaticInit,
1130 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
1131 PUpperBound, PStride, One, Chunk});
1132 Value *LowerBound = Builder.CreateLoad(PLowerBound);
1133 Value *InclusiveUpperBound = Builder.CreateLoad(PUpperBound);
1134 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
1135 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
1136 setCanonicalLoopTripCount(CLI, TripCount);
1137
1138 // Update all uses of the induction variable except the one in the condition
1139 // block that compares it with the actual upper bound, and the increment in
1140 // the latch block.
1141 // TODO: this can eventually move to CanonicalLoopInfo or to a new
1142 // CanonicalLoopInfoUpdater interface.
1143 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt());
1144 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound);
1145 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) {
1146 auto *Instr = dyn_cast<Instruction>(U.getUser());
1147 return !Instr ||
1148 (Instr->getParent() != CLI->getCond() &&
1149 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV);
1150 });
1151
1152 // In the "exit" block, call the "fini" function.
1153 Builder.SetInsertPoint(CLI->getExit(),
1154 CLI->getExit()->getTerminator()->getIterator());
1155 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
1156
1157 // Add the barrier if requested.
1158 if (NeedsBarrier)
1159 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
1160 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1161 /* CheckCancelFlag */ false);
1162
1163 CLI->assertOK();
1164 return CLI;
1165}
1166
1167/// Make \p Source branch to \p Target.
1168///
1169/// Handles two situations:
1170/// * \p Source already has an unconditional branch.
1171/// * \p Source is a degenerate block (no terminator because the BB is
1172/// the current head of the IR construction).
1173static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
1174 if (Instruction *Term = Source->getTerminator()) {
1175 auto *Br = cast<BranchInst>(Term);
1176 assert(!Br->isConditional() &&((!Br->isConditional() && "BB's terminator must be an unconditional branch (or degenerate)"
) ? static_cast<void> (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1177, __PRETTY_FUNCTION__))
1177 "BB's terminator must be an unconditional branch (or degenerate)")((!Br->isConditional() && "BB's terminator must be an unconditional branch (or degenerate)"
) ? static_cast<void> (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1177, __PRETTY_FUNCTION__))
;
1178 BasicBlock *Succ = Br->getSuccessor(0);
1179 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
1180 Br->setSuccessor(0, Target);
1181 return;
1182 }
1183
1184 auto *NewBr = BranchInst::Create(Target, Source);
1185 NewBr->setDebugLoc(DL);
1186}
1187
1188/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
1189/// after this \p OldTarget will be orphaned.
1190static void redirectAllPredecessorsTo(BasicBlock *OldTarget,
1191 BasicBlock *NewTarget, DebugLoc DL) {
1192 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
1193 redirectTo(Pred, NewTarget, DL);
1194}
1195
1196/// Determine which blocks in \p BBs are reachable from outside and remove the
1197/// ones that are not reachable from the function.
1198static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) {
1199 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
1200 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
1201 for (Use &U : BB->uses()) {
1202 auto *UseInst = dyn_cast<Instruction>(U.getUser());
1203 if (!UseInst)
1204 continue;
1205 if (BBsToErase.count(UseInst->getParent()))
1206 continue;
1207 return true;
1208 }
1209 return false;
1210 };
1211
1212 while (true) {
1213 bool Changed = false;
1214 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
1215 if (HasRemainingUses(BB)) {
1216 BBsToErase.erase(BB);
1217 Changed = true;
1218 }
1219 }
1220 if (!Changed)
1221 break;
1222 }
1223
1224 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
1225 DeleteDeadBlocks(BBVec);
1226}
1227
1228std::vector<CanonicalLoopInfo *>
1229OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1230 ArrayRef<Value *> TileSizes) {
1231 assert(TileSizes.size() == Loops.size() &&((TileSizes.size() == Loops.size() && "Must pass as many tile sizes as there are loops"
) ? static_cast<void> (0) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1232, __PRETTY_FUNCTION__))
1232 "Must pass as many tile sizes as there are loops")((TileSizes.size() == Loops.size() && "Must pass as many tile sizes as there are loops"
) ? static_cast<void> (0) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1232, __PRETTY_FUNCTION__))
;
1233 int NumLoops = Loops.size();
1234 assert(NumLoops >= 1 && "At least one loop to tile required")((NumLoops >= 1 && "At least one loop to tile required"
) ? static_cast<void> (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1234, __PRETTY_FUNCTION__))
;
1235
1236 CanonicalLoopInfo *OutermostLoop = Loops.front();
1237 CanonicalLoopInfo *InnermostLoop = Loops.back();
1238 Function *F = OutermostLoop->getBody()->getParent();
1239 BasicBlock *InnerEnter = InnermostLoop->getBody();
1240 BasicBlock *InnerLatch = InnermostLoop->getLatch();
1241
1242 // Collect original trip counts and induction variable to be accessible by
1243 // index. Also, the structure of the original loops is not preserved during
1244 // the construction of the tiled loops, so do it before we scavenge the BBs of
1245 // any original CanonicalLoopInfo.
1246 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
1247 for (CanonicalLoopInfo *L : Loops) {
1248 OrigTripCounts.push_back(L->getTripCount());
1249 OrigIndVars.push_back(L->getIndVar());
1250 }
1251
1252 // Collect the code between loop headers. These may contain SSA definitions
1253 // that are used in the loop nest body. To be usable with in the innermost
1254 // body, these BasicBlocks will be sunk into the loop nest body. That is,
1255 // these instructions may be executed more often than before the tiling.
1256 // TODO: It would be sufficient to only sink them into body of the
1257 // corresponding tile loop.
1258 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode;
1259 for (int i = 0; i < NumLoops - 1; ++i) {
1260 CanonicalLoopInfo *Surrounding = Loops[i];
1261 CanonicalLoopInfo *Nested = Loops[i + 1];
1262
1263 BasicBlock *EnterBB = Surrounding->getBody();
1264 BasicBlock *ExitBB = Nested->getHeader();
1265 InbetweenCode.emplace_back(EnterBB, ExitBB);
1266 }
1267
1268 // Compute the trip counts of the floor loops.
1269 Builder.SetCurrentDebugLocation(DL);
1270 Builder.restoreIP(OutermostLoop->getPreheaderIP());
1271 SmallVector<Value *, 4> FloorCount, FloorRems;
1272 for (int i = 0; i < NumLoops; ++i) {
1273 Value *TileSize = TileSizes[i];
1274 Value *OrigTripCount = OrigTripCounts[i];
1275 Type *IVType = OrigTripCount->getType();
1276
1277 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
1278 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
1279
1280 // 0 if tripcount divides the tilesize, 1 otherwise.
1281 // 1 means we need an additional iteration for a partial tile.
1282 //
1283 // Unfortunately we cannot just use the roundup-formula
1284 // (tripcount + tilesize - 1)/tilesize
1285 // because the summation might overflow. We do not want introduce undefined
1286 // behavior when the untiled loop nest did not.
1287 Value *FloorTripOverflow =
1288 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
1289
1290 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
1291 FloorTripCount =
1292 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
1293 "omp_floor" + Twine(i) + ".tripcount", true);
1294
1295 // Remember some values for later use.
1296 FloorCount.push_back(FloorTripCount);
1297 FloorRems.push_back(FloorTripRem);
1298 }
1299
1300 // Generate the new loop nest, from the outermost to the innermost.
1301 std::vector<CanonicalLoopInfo *> Result;
1302 Result.reserve(NumLoops * 2);
1303
1304 // The basic block of the surrounding loop that enters the nest generated
1305 // loop.
1306 BasicBlock *Enter = OutermostLoop->getPreheader();
1307
1308 // The basic block of the surrounding loop where the inner code should
1309 // continue.
1310 BasicBlock *Continue = OutermostLoop->getAfter();
1311
1312 // Where the next loop basic block should be inserted.
1313 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
1314
1315 auto EmbeddNewLoop =
1316 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
1317 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
1318 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
1319 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
1320 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
1321 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
1322
1323 // Setup the position where the next embedded loop connects to this loop.
1324 Enter = EmbeddedLoop->getBody();
1325 Continue = EmbeddedLoop->getLatch();
1326 OutroInsertBefore = EmbeddedLoop->getLatch();
1327 return EmbeddedLoop;
1328 };
1329
1330 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
1331 const Twine &NameBase) {
1332 for (auto P : enumerate(TripCounts)) {
1333 CanonicalLoopInfo *EmbeddedLoop =
1334 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
1335 Result.push_back(EmbeddedLoop);
1336 }
1337 };
1338
1339 EmbeddNewLoops(FloorCount, "floor");
1340
1341 // Within the innermost floor loop, emit the code that computes the tile
1342 // sizes.
1343 Builder.SetInsertPoint(Enter->getTerminator());
1344 SmallVector<Value *, 4> TileCounts;
1345 for (int i = 0; i < NumLoops; ++i) {
1346 CanonicalLoopInfo *FloorLoop = Result[i];
1347 Value *TileSize = TileSizes[i];
1348
1349 Value *FloorIsEpilogue =
1350 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
1351 Value *TileTripCount =
1352 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
1353
1354 TileCounts.push_back(TileTripCount);
1355 }
1356
1357 // Create the tile loops.
1358 EmbeddNewLoops(TileCounts, "tile");
1359
1360 // Insert the inbetween code into the body.
1361 BasicBlock *BodyEnter = Enter;
1362 BasicBlock *BodyEntered = nullptr;
1363 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
1364 BasicBlock *EnterBB = P.first;
1365 BasicBlock *ExitBB = P.second;
1366
1367 if (BodyEnter)
1368 redirectTo(BodyEnter, EnterBB, DL);
1369 else
1370 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
1371
1372 BodyEnter = nullptr;
1373 BodyEntered = ExitBB;
1374 }
1375
1376 // Append the original loop nest body into the generated loop nest body.
1377 if (BodyEnter)
1378 redirectTo(BodyEnter, InnerEnter, DL);
1379 else
1380 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
1381 redirectAllPredecessorsTo(InnerLatch, Continue, DL);
1382
1383 // Replace the original induction variable with an induction variable computed
1384 // from the tile and floor induction variables.
1385 Builder.restoreIP(Result.back()->getBodyIP());
1386 for (int i = 0; i < NumLoops; ++i) {
1387 CanonicalLoopInfo *FloorLoop = Result[i];
1388 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
1389 Value *OrigIndVar = OrigIndVars[i];
1390 Value *Size = TileSizes[i];
1391
1392 Value *Scale =
1393 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
1394 Value *Shift =
1395 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
1396 OrigIndVar->replaceAllUsesWith(Shift);
1397 }
1398
1399 // Remove unused parts of the original loops.
1400 SmallVector<BasicBlock *, 12> OldControlBBs;
1401 OldControlBBs.reserve(6 * Loops.size());
1402 for (CanonicalLoopInfo *Loop : Loops)
1403 Loop->collectControlBlocks(OldControlBBs);
1404 removeUnusedBlocksFromParent(OldControlBBs);
1405
1406#ifndef NDEBUG
1407 for (CanonicalLoopInfo *GenL : Result)
1408 GenL->assertOK();
1409#endif
1410 return Result;
1411}
1412
1413OpenMPIRBuilder::InsertPointTy
1414OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
1415 llvm::Value *BufSize, llvm::Value *CpyBuf,
1416 llvm::Value *CpyFn, llvm::Value *DidIt) {
1417 if (!updateToLocation(Loc))
1418 return Loc.IP;
1419
1420 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1421 Value *Ident = getOrCreateIdent(SrcLocStr);
1422 Value *ThreadId = getOrCreateThreadID(Ident);
1423
1424 llvm::Value *DidItLD = Builder.CreateLoad(DidIt);
1425
1426 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
1427
1428 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
1429 Builder.CreateCall(Fn, Args);
1430
1431 return Builder.saveIP();
1432}
1433
1434OpenMPIRBuilder::InsertPointTy
1435OpenMPIRBuilder::createSingle(const LocationDescription &Loc,
1436 BodyGenCallbackTy BodyGenCB,
1437 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) {
1438
1439 if (!updateToLocation(Loc))
1440 return Loc.IP;
1441
1442 // If needed (i.e. not null), initialize `DidIt` with 0
1443 if (DidIt) {
1444 Builder.CreateStore(Builder.getInt32(0), DidIt);
1445 }
1446
1447 Directive OMPD = Directive::OMPD_single;
1448 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1449 Value *Ident = getOrCreateIdent(SrcLocStr);
1450 Value *ThreadId = getOrCreateThreadID(Ident);
1451 Value *Args[] = {Ident, ThreadId};
1452
1453 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
1454 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1455
1456 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
1457 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1458
1459 // generates the following:
1460 // if (__kmpc_single()) {
1461 // .... single region ...
1462 // __kmpc_end_single
1463 // }
1464
1465 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1466 /*Conditional*/ true, /*hasFinalize*/ true);
1467}
1468
1469OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical(
1470 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
1471 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
1472
1473 if (!updateToLocation(Loc))
1474 return Loc.IP;
1475
1476 Directive OMPD = Directive::OMPD_critical;
1477 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1478 Value *Ident = getOrCreateIdent(SrcLocStr);
1479 Value *ThreadId = getOrCreateThreadID(Ident);
1480 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
1481 Value *Args[] = {Ident, ThreadId, LockVar};
1482
1483 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
1484 Function *RTFn = nullptr;
1485 if (HintInst) {
1486 // Add Hint to entry Args and create call
1487 EnterArgs.push_back(HintInst);
1488 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
1489 } else {
1490 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
1491 }
1492 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
1493
1494 Function *ExitRTLFn =
1495 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
1496 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1497
1498 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1499 /*Conditional*/ false, /*hasFinalize*/ true);
1500}
1501
1502OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
1503 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
1504 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
1505 bool HasFinalize) {
1506
1507 if (HasFinalize)
1508 FinalizationStack.push_back({FiniCB, OMPD, /*IsCancellable*/ false});
1509
1510 // Create inlined region's entry and body blocks, in preparation
1511 // for conditional creation
1512 BasicBlock *EntryBB = Builder.GetInsertBlock();
1513 Instruction *SplitPos = EntryBB->getTerminator();
1514 if (!isa_and_nonnull<BranchInst>(SplitPos))
1515 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
1516 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
1517 BasicBlock *FiniBB =
1518 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
1519
1520 Builder.SetInsertPoint(EntryBB->getTerminator());
1521 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
1522
1523 // generate body
1524 BodyGenCB(/* AllocaIP */ InsertPointTy(),
1525 /* CodeGenIP */ Builder.saveIP(), *FiniBB);
1526
1527 // If we didn't emit a branch to FiniBB during body generation, it means
1528 // FiniBB is unreachable (e.g. while(1);). stop generating all the
1529 // unreachable blocks, and remove anything we are not going to use.
1530 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0);
1531 if (SkipEmittingRegion) {
1532 FiniBB->eraseFromParent();
1533 ExitCall->eraseFromParent();
1534 // Discard finalization if we have it.
1535 if (HasFinalize) {
1536 assert(!FinalizationStack.empty() &&((!FinalizationStack.empty() && "Unexpected finalization stack state!"
) ? static_cast<void> (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1537, __PRETTY_FUNCTION__))
1537 "Unexpected finalization stack state!")((!FinalizationStack.empty() && "Unexpected finalization stack state!"
) ? static_cast<void> (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1537, __PRETTY_FUNCTION__))
;
1538 FinalizationStack.pop_back();
1539 }
1540 } else {
1541 // emit exit call and do any needed finalization.
1542 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
1543 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&((FiniBB->getTerminator()->getNumSuccessors() == 1 &&
FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&
"Unexpected control flow graph state!!") ? static_cast<void
> (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1545, __PRETTY_FUNCTION__))
1544 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&((FiniBB->getTerminator()->getNumSuccessors() == 1 &&
FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&
"Unexpected control flow graph state!!") ? static_cast<void
> (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1545, __PRETTY_FUNCTION__))
1545 "Unexpected control flow graph state!!")((FiniBB->getTerminator()->getNumSuccessors() == 1 &&
FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&
"Unexpected control flow graph state!!") ? static_cast<void
> (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1545, __PRETTY_FUNCTION__))
;
1546 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
1547 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&((FiniBB->getUniquePredecessor()->getUniqueSuccessor() ==
FiniBB && "Unexpected Control Flow State!") ? static_cast
<void> (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1548, __PRETTY_FUNCTION__))
1548 "Unexpected Control Flow State!")((FiniBB->getUniquePredecessor()->getUniqueSuccessor() ==
FiniBB && "Unexpected Control Flow State!") ? static_cast
<void> (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1548, __PRETTY_FUNCTION__))
;
1549 MergeBlockIntoPredecessor(FiniBB);
1550 }
1551
1552 // If we are skipping the region of a non conditional, remove the exit
1553 // block, and clear the builder's insertion point.
1554 assert(SplitPos->getParent() == ExitBB &&((SplitPos->getParent() == ExitBB && "Unexpected Insertion point location!"
) ? static_cast<void> (0) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1555, __PRETTY_FUNCTION__))
1555 "Unexpected Insertion point location!")((SplitPos->getParent() == ExitBB && "Unexpected Insertion point location!"
) ? static_cast<void> (0) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1555, __PRETTY_FUNCTION__))
;
1556 if (!Conditional && SkipEmittingRegion) {
1557 ExitBB->eraseFromParent();
1558 Builder.ClearInsertionPoint();
1559 } else {
1560 auto merged = MergeBlockIntoPredecessor(ExitBB);
1561 BasicBlock *ExitPredBB = SplitPos->getParent();
1562 auto InsertBB = merged ? ExitPredBB : ExitBB;
1563 if (!isa_and_nonnull<BranchInst>(SplitPos))
1564 SplitPos->eraseFromParent();
1565 Builder.SetInsertPoint(InsertBB);
1566 }
1567
1568 return Builder.saveIP();
1569}
1570
1571OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
1572 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
1573
1574 // if nothing to do, Return current insertion point.
1575 if (!Conditional)
1576 return Builder.saveIP();
1577
1578 BasicBlock *EntryBB = Builder.GetInsertBlock();
1579 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
1580 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
1581 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
1582
1583 // Emit thenBB and set the Builder's insertion point there for
1584 // body generation next. Place the block after the current block.
1585 Function *CurFn = EntryBB->getParent();
1586 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB);
1587
1588 // Move Entry branch to end of ThenBB, and replace with conditional
1589 // branch (If-stmt)
1590 Instruction *EntryBBTI = EntryBB->getTerminator();
1591 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
1592 EntryBBTI->removeFromParent();
1593 Builder.SetInsertPoint(UI);
1594 Builder.Insert(EntryBBTI);
1595 UI->eraseFromParent();
1596 Builder.SetInsertPoint(ThenBB->getTerminator());
1597
1598 // return an insertion point to ExitBB.
1599 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
1600}
1601
1602OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
1603 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
1604 bool HasFinalize) {
1605
1606 Builder.restoreIP(FinIP);
1607
1608 // If there is finalization to do, emit it before the exit call
1609 if (HasFinalize) {
1610 assert(!FinalizationStack.empty() &&((!FinalizationStack.empty() && "Unexpected finalization stack state!"
) ? static_cast<void> (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1611, __PRETTY_FUNCTION__))
1611 "Unexpected finalization stack state!")((!FinalizationStack.empty() && "Unexpected finalization stack state!"
) ? static_cast<void> (0) : __assert_fail ("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1611, __PRETTY_FUNCTION__))
;
1612
1613 FinalizationInfo Fi = FinalizationStack.pop_back_val();
1614 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")((Fi.DK == OMPD && "Unexpected Directive for Finalization call!"
) ? static_cast<void> (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1614, __PRETTY_FUNCTION__))
;
1615
1616 Fi.FiniCB(FinIP);
1617
1618 BasicBlock *FiniBB = FinIP.getBlock();
1619 Instruction *FiniBBTI = FiniBB->getTerminator();
1620
1621 // set Builder IP for call creation
1622 Builder.SetInsertPoint(FiniBBTI);
1623 }
1624
1625 // place the Exitcall as last instruction before Finalization block terminator
1626 ExitCall->removeFromParent();
1627 Builder.Insert(ExitCall);
1628
1629 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
1630 ExitCall->getIterator());
1631}
1632
1633OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks(
1634 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
1635 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
1636 if (!IP.isSet())
1637 return IP;
1638
1639 IRBuilder<>::InsertPointGuard IPG(Builder);
1640
1641 // creates the following CFG structure
1642 // OMP_Entry : (MasterAddr != PrivateAddr)?
1643 // F T
1644 // | \
1645 // | copin.not.master
1646 // | /
1647 // v /
1648 // copyin.not.master.end
1649 // |
1650 // v
1651 // OMP.Entry.Next
1652
1653 BasicBlock *OMP_Entry = IP.getBlock();
1654 Function *CurFn = OMP_Entry->getParent();
1655 BasicBlock *CopyBegin =
1656 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
1657 BasicBlock *CopyEnd = nullptr;
1658
1659 // If entry block is terminated, split to preserve the branch to following
1660 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
1661 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
1662 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
1663 "copyin.not.master.end");
1664 OMP_Entry->getTerminator()->eraseFromParent();
1665 } else {
1666 CopyEnd =
1667 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
1668 }
1669
1670 Builder.SetInsertPoint(OMP_Entry);
1671 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
1672 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
1673 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
1674 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
1675
1676 Builder.SetInsertPoint(CopyBegin);
1677 if (BranchtoEnd)
1678 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd));
1679
1680 return Builder.saveIP();
1681}
1682
1683CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc,
1684 Value *Size, Value *Allocator,
1685 std::string Name) {
1686 IRBuilder<>::InsertPointGuard IPG(Builder);
1687 Builder.restoreIP(Loc.IP);
1688
1689 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1690 Value *Ident = getOrCreateIdent(SrcLocStr);
1691 Value *ThreadId = getOrCreateThreadID(Ident);
1692 Value *Args[] = {ThreadId, Size, Allocator};
1693
1694 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
1695
1696 return Builder.CreateCall(Fn, Args, Name);
1697}
1698
1699CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc,
1700 Value *Addr, Value *Allocator,
1701 std::string Name) {
1702 IRBuilder<>::InsertPointGuard IPG(Builder);
1703 Builder.restoreIP(Loc.IP);
1704
1705 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1706 Value *Ident = getOrCreateIdent(SrcLocStr);
1707 Value *ThreadId = getOrCreateThreadID(Ident);
1708 Value *Args[] = {ThreadId, Addr, Allocator};
1709 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
1710 return Builder.CreateCall(Fn, Args, Name);
1711}
1712
1713CallInst *OpenMPIRBuilder::createCachedThreadPrivate(
1714 const LocationDescription &Loc, llvm::Value *Pointer,
1715 llvm::ConstantInt *Size, const llvm::Twine &Name) {
1716 IRBuilder<>::InsertPointGuard IPG(Builder);
1717 Builder.restoreIP(Loc.IP);
1718
1719 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1720 Value *Ident = getOrCreateIdent(SrcLocStr);
1721 Value *ThreadId = getOrCreateThreadID(Ident);
1722 Constant *ThreadPrivateCache =
1723 getOrCreateOMPInternalVariable(Int8PtrPtr, Name);
1724 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache};
1725
1726 Function *Fn =
1727 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached);
1728
1729 return Builder.CreateCall(Fn, Args);
1730}
1731
1732std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts,
1733 StringRef FirstSeparator,
1734 StringRef Separator) {
1735 SmallString<128> Buffer;
1736 llvm::raw_svector_ostream OS(Buffer);
1737 StringRef Sep = FirstSeparator;
1738 for (StringRef Part : Parts) {
1739 OS << Sep << Part;
1740 Sep = Separator;
1741 }
1742 return OS.str().str();
1743}
1744
1745Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable(
1746 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
1747 // TODO: Replace the twine arg with stringref to get rid of the conversion
1748 // logic. However This is taken from current implementation in clang as is.
1749 // Since this method is used in many places exclusively for OMP internal use
1750 // we will keep it as is for temporarily until we move all users to the
1751 // builder and then, if possible, fix it everywhere in one go.
1752 SmallString<256> Buffer;
1753 llvm::raw_svector_ostream Out(Buffer);
1754 Out << Name;
1755 StringRef RuntimeName = Out.str();
1756 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
1757 if (Elem.second) {
1758 assert(Elem.second->getType()->getPointerElementType() == Ty &&((Elem.second->getType()->getPointerElementType() == Ty
&& "OMP internal variable has different type than requested"
) ? static_cast<void> (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1759, __PRETTY_FUNCTION__))
1759 "OMP internal variable has different type than requested")((Elem.second->getType()->getPointerElementType() == Ty
&& "OMP internal variable has different type than requested"
) ? static_cast<void> (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1759, __PRETTY_FUNCTION__))
;
1760 } else {
1761 // TODO: investigate the appropriate linkage type used for the global
1762 // variable for possibly changing that to internal or private, or maybe
1763 // create different versions of the function for different OMP internal
1764 // variables.
1765 Elem.second = new llvm::GlobalVariable(
1766 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage,
1767 llvm::Constant::getNullValue(Ty), Elem.first(),
1768 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
1769 AddressSpace);
1770 }
1771
1772 return Elem.second;
1773}
1774
1775Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) {
1776 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
1777 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", ".");
1778 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name);
1779}
1780
1781// Create all simple and struct types exposed by the runtime and remember
1782// the llvm::PointerTypes of them for easy access later.
1783void OpenMPIRBuilder::initializeTypes(Module &M) {
1784 LLVMContext &Ctx = M.getContext();
1785 StructType *T;
1786#define OMP_TYPE(VarName, InitValue) VarName = InitValue;
1787#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
1788 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \
1789 VarName##PtrTy = PointerType::getUnqual(VarName##Ty);
1790#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
1791 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \
1792 VarName##Ptr = PointerType::getUnqual(VarName);
1793#define OMP_STRUCT_TYPE(VarName, StructName, ...) \
1794 T = StructType::getTypeByName(Ctx, StructName); \
1795 if (!T) \
1796 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \
1797 VarName = T; \
1798 VarName##Ptr = PointerType::getUnqual(T);
1799#include "llvm/Frontend/OpenMP/OMPKinds.def"
1800}
1801
1802void OpenMPIRBuilder::OutlineInfo::collectBlocks(
1803 SmallPtrSetImpl<BasicBlock *> &BlockSet,
1804 SmallVectorImpl<BasicBlock *> &BlockVector) {
1805 SmallVector<BasicBlock *, 32> Worklist;
1806 BlockSet.insert(EntryBB);
1807 BlockSet.insert(ExitBB);
1808
1809 Worklist.push_back(EntryBB);
1810 while (!Worklist.empty()) {
1811 BasicBlock *BB = Worklist.pop_back_val();
1812 BlockVector.push_back(BB);
1813 for (BasicBlock *SuccBB : successors(BB))
1814 if (BlockSet.insert(SuccBB).second)
1815 Worklist.push_back(SuccBB);
1816 }
1817}
1818
1819void CanonicalLoopInfo::collectControlBlocks(
1820 SmallVectorImpl<BasicBlock *> &BBs) {
1821 // We only count those BBs as control block for which we do not need to
1822 // reverse the CFG, i.e. not the loop body which can contain arbitrary control
1823 // flow. For consistency, this also means we do not add the Body block, which
1824 // is just the entry to the body code.
1825 BBs.reserve(BBs.size() + 6);
1826 BBs.append({Preheader, Header, Cond, Latch, Exit, After});
1827}
1828
1829void CanonicalLoopInfo::assertOK() const {
1830#ifndef NDEBUG
1831 if (!IsValid)
1832 return;
1833
1834 // Verify standard control-flow we use for OpenMP loops.
1835 assert(Preheader)((Preheader) ? static_cast<void> (0) : __assert_fail ("Preheader"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1835, __PRETTY_FUNCTION__))
;
1836 assert(isa<BranchInst>(Preheader->getTerminator()) &&((isa<BranchInst>(Preheader->getTerminator()) &&
"Preheader must terminate with unconditional branch") ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1837, __PRETTY_FUNCTION__))
1837 "Preheader must terminate with unconditional branch")((isa<BranchInst>(Preheader->getTerminator()) &&
"Preheader must terminate with unconditional branch") ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1837, __PRETTY_FUNCTION__))
;
1838 assert(Preheader->getSingleSuccessor() == Header &&((Preheader->getSingleSuccessor() == Header && "Preheader must jump to header"
) ? static_cast<void> (0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1839, __PRETTY_FUNCTION__))
1839 "Preheader must jump to header")((Preheader->getSingleSuccessor() == Header && "Preheader must jump to header"
) ? static_cast<void> (0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1839, __PRETTY_FUNCTION__))
;
1840
1841 assert(Header)((Header) ? static_cast<void> (0) : __assert_fail ("Header"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1841, __PRETTY_FUNCTION__))
;
1842 assert(isa<BranchInst>(Header->getTerminator()) &&((isa<BranchInst>(Header->getTerminator()) &&
"Header must terminate with unconditional branch") ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1843, __PRETTY_FUNCTION__))
1843 "Header must terminate with unconditional branch")((isa<BranchInst>(Header->getTerminator()) &&
"Header must terminate with unconditional branch") ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1843, __PRETTY_FUNCTION__))
;
1844 assert(Header->getSingleSuccessor() == Cond &&((Header->getSingleSuccessor() == Cond && "Header must jump to exiting block"
) ? static_cast<void> (0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1845, __PRETTY_FUNCTION__))
1845 "Header must jump to exiting block")((Header->getSingleSuccessor() == Cond && "Header must jump to exiting block"
) ? static_cast<void> (0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1845, __PRETTY_FUNCTION__))
;
1846
1847 assert(Cond)((Cond) ? static_cast<void> (0) : __assert_fail ("Cond"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1847, __PRETTY_FUNCTION__))
;
1848 assert(Cond->getSinglePredecessor() == Header &&((Cond->getSinglePredecessor() == Header && "Exiting block only reachable from header"
) ? static_cast<void> (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1849, __PRETTY_FUNCTION__))
1849 "Exiting block only reachable from header")((Cond->getSinglePredecessor() == Header && "Exiting block only reachable from header"
) ? static_cast<void> (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1849, __PRETTY_FUNCTION__))
;
1850
1851 assert(isa<BranchInst>(Cond->getTerminator()) &&((isa<BranchInst>(Cond->getTerminator()) && "Exiting block must terminate with conditional branch"
) ? static_cast<void> (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1852, __PRETTY_FUNCTION__))
1852 "Exiting block must terminate with conditional branch")((isa<BranchInst>(Cond->getTerminator()) && "Exiting block must terminate with conditional branch"
) ? static_cast<void> (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1852, __PRETTY_FUNCTION__))
;
1853 assert(size(successors(Cond)) == 2 &&((size(successors(Cond)) == 2 && "Exiting block must have two successors"
) ? static_cast<void> (0) : __assert_fail ("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1854, __PRETTY_FUNCTION__))
1854 "Exiting block must have two successors")((size(successors(Cond)) == 2 && "Exiting block must have two successors"
) ? static_cast<void> (0) : __assert_fail ("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1854, __PRETTY_FUNCTION__))
;
1855 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&((cast<BranchInst>(Cond->getTerminator())->getSuccessor
(0) == Body && "Exiting block's first successor jump to the body"
) ? static_cast<void> (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1856, __PRETTY_FUNCTION__))
1856 "Exiting block's first successor jump to the body")((cast<BranchInst>(Cond->getTerminator())->getSuccessor
(0) == Body && "Exiting block's first successor jump to the body"
) ? static_cast<void> (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1856, __PRETTY_FUNCTION__))
;
1857 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&((cast<BranchInst>(Cond->getTerminator())->getSuccessor
(1) == Exit && "Exiting block's second successor must exit the loop"
) ? static_cast<void> (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1858, __PRETTY_FUNCTION__))
1858 "Exiting block's second successor must exit the loop")((cast<BranchInst>(Cond->getTerminator())->getSuccessor
(1) == Exit && "Exiting block's second successor must exit the loop"
) ? static_cast<void> (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1858, __PRETTY_FUNCTION__))
;
1859
1860 assert(Body)((Body) ? static_cast<void> (0) : __assert_fail ("Body"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1860, __PRETTY_FUNCTION__))
;
1861 assert(Body->getSinglePredecessor() == Cond &&((Body->getSinglePredecessor() == Cond && "Body only reachable from exiting block"
) ? static_cast<void> (0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1862, __PRETTY_FUNCTION__))
1862 "Body only reachable from exiting block")((Body->getSinglePredecessor() == Cond && "Body only reachable from exiting block"
) ? static_cast<void> (0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1862, __PRETTY_FUNCTION__))
;
1863 assert(!isa<PHINode>(Body->front()))((!isa<PHINode>(Body->front())) ? static_cast<void
> (0) : __assert_fail ("!isa<PHINode>(Body->front())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1863, __PRETTY_FUNCTION__))
;
1864
1865 assert(Latch)((Latch) ? static_cast<void> (0) : __assert_fail ("Latch"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1865, __PRETTY_FUNCTION__))
;
1866 assert(isa<BranchInst>(Latch->getTerminator()) &&((isa<BranchInst>(Latch->getTerminator()) &&
"Latch must terminate with unconditional branch") ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1867, __PRETTY_FUNCTION__))
1867 "Latch must terminate with unconditional branch")((isa<BranchInst>(Latch->getTerminator()) &&
"Latch must terminate with unconditional branch") ? static_cast
<void> (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1867, __PRETTY_FUNCTION__))
;
1868 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")((Latch->getSingleSuccessor() == Header && "Latch must jump to header"
) ? static_cast<void> (0) : __assert_fail ("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1868, __PRETTY_FUNCTION__))
;
1869 // TODO: To support simple redirecting of the end of the body code that has
1870 // multiple; introduce another auxiliary basic block like preheader and after.
1871 assert(Latch->getSinglePredecessor() != nullptr)((Latch->getSinglePredecessor() != nullptr) ? static_cast<
void> (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1871, __PRETTY_FUNCTION__))
;
1872 assert(!isa<PHINode>(Latch->front()))((!isa<PHINode>(Latch->front())) ? static_cast<void
> (0) : __assert_fail ("!isa<PHINode>(Latch->front())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1872, __PRETTY_FUNCTION__))
;
1873
1874 assert(Exit)((Exit) ? static_cast<void> (0) : __assert_fail ("Exit"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1874, __PRETTY_FUNCTION__))
;
1875 assert(isa<BranchInst>(Exit->getTerminator()) &&((isa<BranchInst>(Exit->getTerminator()) && "Exit block must terminate with unconditional branch"
) ? static_cast<void> (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1876, __PRETTY_FUNCTION__))
1876 "Exit block must terminate with unconditional branch")((isa<BranchInst>(Exit->getTerminator()) && "Exit block must terminate with unconditional branch"
) ? static_cast<void> (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1876, __PRETTY_FUNCTION__))
;
1877 assert(Exit->getSingleSuccessor() == After &&((Exit->getSingleSuccessor() == After && "Exit block must jump to after block"
) ? static_cast<void> (0) : __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1878, __PRETTY_FUNCTION__))
1878 "Exit block must jump to after block")((Exit->getSingleSuccessor() == After && "Exit block must jump to after block"
) ? static_cast<void> (0) : __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1878, __PRETTY_FUNCTION__))
;
1879
1880 assert(After)((After) ? static_cast<void> (0) : __assert_fail ("After"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1880, __PRETTY_FUNCTION__))
;
1881 assert(After->getSinglePredecessor() == Exit &&((After->getSinglePredecessor() == Exit && "After block only reachable from exit block"
) ? static_cast<void> (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1882, __PRETTY_FUNCTION__))
1882 "After block only reachable from exit block")((After->getSinglePredecessor() == Exit && "After block only reachable from exit block"
) ? static_cast<void> (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1882, __PRETTY_FUNCTION__))
;
1883 assert(After->empty() || !isa<PHINode>(After->front()))((After->empty() || !isa<PHINode>(After->front())
) ? static_cast<void> (0) : __assert_fail ("After->empty() || !isa<PHINode>(After->front())"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1883, __PRETTY_FUNCTION__))
;
1884
1885 Instruction *IndVar = getIndVar();
1886 assert(IndVar && "Canonical induction variable not found?")((IndVar && "Canonical induction variable not found?"
) ? static_cast<void> (0) : __assert_fail ("IndVar && \"Canonical induction variable not found?\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1886, __PRETTY_FUNCTION__))
;
1887 assert(isa<IntegerType>(IndVar->getType()) &&((isa<IntegerType>(IndVar->getType()) && "Induction variable must be an integer"
) ? static_cast<void> (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1888, __PRETTY_FUNCTION__))
1888 "Induction variable must be an integer")((isa<IntegerType>(IndVar->getType()) && "Induction variable must be an integer"
) ? static_cast<void> (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1888, __PRETTY_FUNCTION__))
;
1889 assert(cast<PHINode>(IndVar)->getParent() == Header &&((cast<PHINode>(IndVar)->getParent() == Header &&
"Induction variable must be a PHI in the loop header") ? static_cast
<void> (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1890, __PRETTY_FUNCTION__))
1890 "Induction variable must be a PHI in the loop header")((cast<PHINode>(IndVar)->getParent() == Header &&
"Induction variable must be a PHI in the loop header") ? static_cast
<void> (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1890, __PRETTY_FUNCTION__))
;
1891 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)((cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader
) ? static_cast<void> (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1891, __PRETTY_FUNCTION__))
;
1892 assert(((cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue
(0))->isZero()) ? static_cast<void> (0) : __assert_fail
("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1893, __PRETTY_FUNCTION__))
1893 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())((cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue
(0))->isZero()) ? static_cast<void> (0) : __assert_fail
("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1893, __PRETTY_FUNCTION__))
;
1894 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)((cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch
) ? static_cast<void> (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1894, __PRETTY_FUNCTION__))
;
1895
1896 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1);
1897 assert(cast<Instruction>(NextIndVar)->getParent() == Latch)((cast<Instruction>(NextIndVar)->getParent() == Latch
) ? static_cast<void> (0) : __assert_fail ("cast<Instruction>(NextIndVar)->getParent() == Latch"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1897, __PRETTY_FUNCTION__))
;
1898 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)((cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator
::Add) ? static_cast<void> (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1898, __PRETTY_FUNCTION__))
;
1899 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)((cast<BinaryOperator>(NextIndVar)->getOperand(0) ==
IndVar) ? static_cast<void> (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1899, __PRETTY_FUNCTION__))
;
1900 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))((cast<ConstantInt>(cast<BinaryOperator>(NextIndVar
)->getOperand(1)) ->isOne()) ? static_cast<void> (
0) : __assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1901, __PRETTY_FUNCTION__))
1901 ->isOne())((cast<ConstantInt>(cast<BinaryOperator>(NextIndVar
)->getOperand(1)) ->isOne()) ? static_cast<void> (
0) : __assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1901, __PRETTY_FUNCTION__))
;
1902
1903 Value *TripCount = getTripCount();
1904 assert(TripCount && "Loop trip count not found?")((TripCount && "Loop trip count not found?") ? static_cast
<void> (0) : __assert_fail ("TripCount && \"Loop trip count not found?\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1904, __PRETTY_FUNCTION__))
;
1905 assert(IndVar->getType() == TripCount->getType() &&((IndVar->getType() == TripCount->getType() && "Trip count and induction variable must have the same type"
) ? static_cast<void> (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1906, __PRETTY_FUNCTION__))
1906 "Trip count and induction variable must have the same type")((IndVar->getType() == TripCount->getType() && "Trip count and induction variable must have the same type"
) ? static_cast<void> (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1906, __PRETTY_FUNCTION__))
;
1907
1908 auto *CmpI = cast<CmpInst>(&Cond->front());
1909 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&((CmpI->getPredicate() == CmpInst::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? static_cast<void> (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1910, __PRETTY_FUNCTION__))
1910 "Exit condition must be a signed less-than comparison")((CmpI->getPredicate() == CmpInst::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? static_cast<void> (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1910, __PRETTY_FUNCTION__))
;
1911 assert(CmpI->getOperand(0) == IndVar &&((CmpI->getOperand(0) == IndVar && "Exit condition must compare the induction variable"
) ? static_cast<void> (0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1912, __PRETTY_FUNCTION__))
1912 "Exit condition must compare the induction variable")((CmpI->getOperand(0) == IndVar && "Exit condition must compare the induction variable"
) ? static_cast<void> (0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1912, __PRETTY_FUNCTION__))
;
1913 assert(CmpI->getOperand(1) == TripCount &&((CmpI->getOperand(1) == TripCount && "Exit condition must compare with the trip count"
) ? static_cast<void> (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1914, __PRETTY_FUNCTION__))
1914 "Exit condition must compare with the trip count")((CmpI->getOperand(1) == TripCount && "Exit condition must compare with the trip count"
) ? static_cast<void> (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1914, __PRETTY_FUNCTION__))
;
1915#endif
1916}

/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))((isa<Constant>(V)) ? static_cast<void> (0) : __assert_fail
("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 163, __PRETTY_FUNCTION__))
;
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
5
Null pointer value stored to field 'BB'
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")((InsertPt != BB->end() && "Can't read debug loc from end()"
) ? static_cast<void> (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 194, __PRETTY_FUNCTION__))
;
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
3
Taking false branch
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
4
Calling 'IRBuilderBase::ClearInsertionPoint'
6
Returning from 'IRBuilderBase::ClearInsertionPoint'
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? static_cast<void> (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 320, __PRETTY_FUNCTION__))
;
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? static_cast<void> (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 329, __PRETTY_FUNCTION__))
;
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")((BB && "Must have a basic block to set any function attributes!"
) ? static_cast<void> (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 345, __PRETTY_FUNCTION__))
;
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
651 MaybeAlign SrcAlign, Value *Size);
652
653 /// Create and insert an element unordered-atomic memcpy between the
654 /// specified pointers.
655 ///
656 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
657 ///
658 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
659 /// specified, it will be added to the instruction. Likewise with alias.scope
660 /// and noalias tags.
661 CallInst *CreateElementUnorderedAtomicMemCpy(
662 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
663 uint32_t ElementSize, MDNode *TBAATag = nullptr,
664 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
665 MDNode *NoAliasTag = nullptr);
666
667 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemCpy([[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
668 Value *Dst, unsigned DstAlign, Value *Src,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
669 unsigned SrcAlign, uint64_t Size,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
670 uint32_t ElementSize, MDNode *TBAATag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
671 MDNode *TBAAStructTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
672 MDNode *ScopeTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
673 MDNode *NoAliasTag = nullptr),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
674 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
{
675 return CreateElementUnorderedAtomicMemCpy(
676 Dst, Align(DstAlign), Src, Align(SrcAlign), getInt64(Size), ElementSize,
677 TBAATag, TBAAStructTag, ScopeTag, NoAliasTag);
678 }
679
680 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemCpy([[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
681 Value *Dst, unsigned DstAlign, Value *Src,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
682 unsigned SrcAlign, Value *Size,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
683 uint32_t ElementSize, MDNode *TBAATag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
684 MDNode *TBAAStructTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
685 MDNode *ScopeTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
686 MDNode *NoAliasTag = nullptr),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
687 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemCpy( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
{
688 return CreateElementUnorderedAtomicMemCpy(
689 Dst, Align(DstAlign), Src, Align(SrcAlign), Size, ElementSize, TBAATag,
690 TBAAStructTag, ScopeTag, NoAliasTag);
691 }
692
693 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
694 MaybeAlign SrcAlign, uint64_t Size,
695 bool isVolatile = false, MDNode *TBAATag = nullptr,
696 MDNode *ScopeTag = nullptr,
697 MDNode *NoAliasTag = nullptr) {
698 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
699 isVolatile, TBAATag, ScopeTag, NoAliasTag);
700 }
701
702 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
703 MaybeAlign SrcAlign, Value *Size,
704 bool isVolatile = false, MDNode *TBAATag = nullptr,
705 MDNode *ScopeTag = nullptr,
706 MDNode *NoAliasTag = nullptr);
707
708 /// \brief Create and insert an element unordered-atomic memmove between the
709 /// specified pointers.
710 ///
711 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
712 /// respectively.
713 ///
714 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
715 /// specified, it will be added to the instruction. Likewise with alias.scope
716 /// and noalias tags.
717 CallInst *CreateElementUnorderedAtomicMemMove(
718 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
719 uint32_t ElementSize, MDNode *TBAATag = nullptr,
720 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
721 MDNode *NoAliasTag = nullptr);
722
723 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemMove([[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
724 Value *Dst, unsigned DstAlign, Value *Src,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
725 unsigned SrcAlign, uint64_t Size,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
726 uint32_t ElementSize, MDNode *TBAATag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
727 MDNode *TBAAStructTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
728 MDNode *ScopeTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
729 MDNode *NoAliasTag = nullptr),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
730 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, uint64_t Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
{
731 return CreateElementUnorderedAtomicMemMove(
732 Dst, Align(DstAlign), Src, Align(SrcAlign), getInt64(Size), ElementSize,
733 TBAATag, TBAAStructTag, ScopeTag, NoAliasTag);
734 }
735
736 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateElementUnorderedAtomicMemMove([[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
737 Value *Dst, unsigned DstAlign, Value *Src,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
738 unsigned SrcAlign, Value *Size,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
739 uint32_t ElementSize, MDNode *TBAATag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
740 MDNode *TBAAStructTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
741 MDNode *ScopeTag = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
742 MDNode *NoAliasTag = nullptr),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
743 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateElementUnorderedAtomicMemMove( Value *Dst, unsigned DstAlign
, Value *Src, unsigned SrcAlign, Value *Size, uint32_t ElementSize
, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr)
{
744 return CreateElementUnorderedAtomicMemMove(
745 Dst, Align(DstAlign), Src, Align(SrcAlign), Size, ElementSize, TBAATag,
746 TBAAStructTag, ScopeTag, NoAliasTag);
747 }
748
749 /// Create a vector fadd reduction intrinsic of the source vector.
750 /// The first parameter is a scalar accumulator value for ordered reductions.
751 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
752
753 /// Create a vector fmul reduction intrinsic of the source vector.
754 /// The first parameter is a scalar accumulator value for ordered reductions.
755 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
756
757 /// Create a vector int add reduction intrinsic of the source vector.
758 CallInst *CreateAddReduce(Value *Src);
759
760 /// Create a vector int mul reduction intrinsic of the source vector.
761 CallInst *CreateMulReduce(Value *Src);
762
763 /// Create a vector int AND reduction intrinsic of the source vector.
764 CallInst *CreateAndReduce(Value *Src);
765
766 /// Create a vector int OR reduction intrinsic of the source vector.
767 CallInst *CreateOrReduce(Value *Src);
768
769 /// Create a vector int XOR reduction intrinsic of the source vector.
770 CallInst *CreateXorReduce(Value *Src);
771
772 /// Create a vector integer max reduction intrinsic of the source
773 /// vector.
774 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
775
776 /// Create a vector integer min reduction intrinsic of the source
777 /// vector.
778 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
779
780 /// Create a vector float max reduction intrinsic of the source
781 /// vector.
782 CallInst *CreateFPMaxReduce(Value *Src);
783
784 /// Create a vector float min reduction intrinsic of the source
785 /// vector.
786 CallInst *CreateFPMinReduce(Value *Src);
787
788 /// Create a lifetime.start intrinsic.
789 ///
790 /// If the pointer isn't i8* it will be converted.
791 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
792
793 /// Create a lifetime.end intrinsic.
794 ///
795 /// If the pointer isn't i8* it will be converted.
796 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
797
798 /// Create a call to invariant.start intrinsic.
799 ///
800 /// If the pointer isn't i8* it will be converted.
801 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
802
803 /// Create a call to Masked Load intrinsic
804 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask
, Value *PassThru = nullptr, const Twine &Name = "")
805 CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask
, Value *PassThru = nullptr, const Twine &Name = "")
806 Value *PassThru = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask
, Value *PassThru = nullptr, const Twine &Name = "")
807 const Twine &Name = ""),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask
, Value *PassThru = nullptr, const Twine &Name = "")
808 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask
, Value *PassThru = nullptr, const Twine &Name = "")
{
809 return CreateMaskedLoad(Ptr, assumeAligned(Alignment), Mask, PassThru,
810 Name);
811 }
812 CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
813 Value *PassThru = nullptr, const Twine &Name = "");
814
815 /// Create a call to Masked Store intrinsic
816 LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedStore(Value *Val, Value *Ptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask)
817 unsigned Alignment,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask)
818 Value *Mask),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask)
819 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedStore(Value *Val, Value *Ptr, unsigned Alignment
, Value *Mask)
{
820 return CreateMaskedStore(Val, Ptr, assumeAligned(Alignment), Mask);
821 }
822
823 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
824 Value *Mask);
825
826 /// Create a call to Masked Gather intrinsic
827 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedGather(Value *Ptrs, unsigned Alignment, Value *
Mask = nullptr, Value *PassThru = nullptr, const Twine &Name
= "")
828 CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedGather(Value *Ptrs, unsigned Alignment, Value *
Mask = nullptr, Value *PassThru = nullptr, const Twine &Name
= "")
829 Value *Mask = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedGather(Value *Ptrs, unsigned Alignment, Value *
Mask = nullptr, Value *PassThru = nullptr, const Twine &Name
= "")
830 Value *PassThru = nullptr,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedGather(Value *Ptrs, unsigned Alignment, Value *
Mask = nullptr, Value *PassThru = nullptr, const Twine &Name
= "")
831 const Twine &Name = ""),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedGather(Value *Ptrs, unsigned Alignment, Value *
Mask = nullptr, Value *PassThru = nullptr, const Twine &Name
= "")
832 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedGather(Value *Ptrs, unsigned Alignment, Value *
Mask = nullptr, Value *PassThru = nullptr, const Twine &Name
= "")
{
833 return CreateMaskedGather(Ptrs, Align(Alignment), Mask, PassThru, Name);
834 }
835
836 /// Create a call to Masked Gather intrinsic
837 CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
838 Value *Mask = nullptr, Value *PassThru = nullptr,
839 const Twine &Name = "");
840
841 /// Create a call to Masked Scatter intrinsic
842 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment
, Value *Mask = nullptr)
843 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment,[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment
, Value *Mask = nullptr)
844 Value *Mask = nullptr),[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment
, Value *Mask = nullptr)
845 "Use the version that takes Align instead")[[deprecated("Use the version that takes Align instead")]] CallInst
*CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment
, Value *Mask = nullptr)
{
846 return CreateMaskedScatter(Val, Ptrs, Align(Alignment), Mask);
847 }
848
849 /// Create a call to Masked Scatter intrinsic
850 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
851 Value *Mask = nullptr);
852
853 /// Create an assume intrinsic call that allows the optimizer to
854 /// assume that the provided condition will be true.
855 ///
856 /// The optional argument \p OpBundles specifies operand bundles that are
857 /// added to the call instruction.
858 CallInst *CreateAssumption(Value *Cond,
859 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
860
861 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
862 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
863 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
864 return CreateNoAliasScopeDeclaration(
865 MetadataAsValue::get(Context, ScopeTag));
866 }
867
868 /// Create a call to the experimental.gc.statepoint intrinsic to
869 /// start a new statepoint sequence.
870 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
871 Value *ActualCallee,
872 ArrayRef<Value *> CallArgs,
873 Optional<ArrayRef<Value *>> DeoptArgs,
874 ArrayRef<Value *> GCArgs,
875 const Twine &Name = "");
876
877 /// Create a call to the experimental.gc.statepoint intrinsic to
878 /// start a new statepoint sequence.
879 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
880 Value *ActualCallee, uint32_t Flags,
881 ArrayRef<Value *> CallArgs,
882 Optional<ArrayRef<Use>> TransitionArgs,
883 Optional<ArrayRef<Use>> DeoptArgs,
884 ArrayRef<Value *> GCArgs,
885 const Twine &Name = "");
886
887 /// Conveninence function for the common case when CallArgs are filled
888 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
889 /// .get()'ed to get the Value pointer.
890 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
891 Value *ActualCallee, ArrayRef<Use> CallArgs,
892 Optional<ArrayRef<Value *>> DeoptArgs,
893 ArrayRef<Value *> GCArgs,
894 const Twine &Name = "");
895
896 /// Create an invoke to the experimental.gc.statepoint intrinsic to
897 /// start a new statepoint sequence.
898 InvokeInst *
899 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
900 Value *ActualInvokee, BasicBlock *NormalDest,
901 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
902 Optional<ArrayRef<Value *>> DeoptArgs,
903 ArrayRef<Value *> GCArgs, const Twine &Name = "");
904
905 /// Create an invoke to the experimental.gc.statepoint intrinsic to
906 /// start a new statepoint sequence.
907 InvokeInst *CreateGCStatepointInvoke(
908 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
909 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
910 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
911 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
912 const Twine &Name = "");
913
914 // Convenience function for the common case when CallArgs are filled in using
915 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
916 // get the Value *.
917 InvokeInst *
918 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
919 Value *ActualInvokee, BasicBlock *NormalDest,
920 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
921 Optional<ArrayRef<Value *>> DeoptArgs,
922 ArrayRef<Value *> GCArgs, const Twine &Name = "");
923
924 /// Create a call to the experimental.gc.result intrinsic to extract
925 /// the result from a call wrapped in a statepoint.
926 CallInst *CreateGCResult(Instruction *Statepoint,
927 Type *ResultType,
928 const Twine &Name = "");
929
930 /// Create a call to the experimental.gc.relocate intrinsics to
931 /// project the relocated value of one pointer from the statepoint.
932 CallInst *CreateGCRelocate(Instruction *Statepoint,
933 int BaseOffset,
934 int DerivedOffset,
935 Type *ResultType,
936 const Twine &Name = "");
937
938 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
939 /// will be the same type as that of \p Scaling.
940 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
941
942 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
943 /// type.
944 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
945 Instruction *FMFSource = nullptr,
946 const Twine &Name = "");
947
948 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
949 /// first type.
950 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
951 Instruction *FMFSource = nullptr,
952 const Twine &Name = "");
953
954 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
955 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
956 /// the intrinsic.
957 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
958 ArrayRef<Value *> Args,
959 Instruction *FMFSource = nullptr,
960 const Twine &Name = "");
961
962 /// Create call to the minnum intrinsic.
963 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
964 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
965 }
966
967 /// Create call to the maxnum intrinsic.
968 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
969 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
970 }
971
972 /// Create call to the minimum intrinsic.
973 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
974 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
975 }
976
977 /// Create call to the maximum intrinsic.
978 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
979 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
980 }
981
982 /// Create a call to the experimental.vector.extract intrinsic.
983 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
984 const Twine &Name = "") {
985 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
986 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
987 Name);
988 }
989
990 /// Create a call to the experimental.vector.insert intrinsic.
991 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
992 Value *Idx, const Twine &Name = "") {
993 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
994 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
995 nullptr, Name);
996 }
997
998private:
999 /// Create a call to a masked intrinsic with given Id.
1000 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
1001 ArrayRef<Type *> OverloadedTypes,
1002 const Twine &Name = "");
1003
1004 Value *getCastedInt8PtrValue(Value *Ptr);
1005
1006 //===--------------------------------------------------------------------===//
1007 // Instruction creation methods: Terminators
1008 //===--------------------------------------------------------------------===//
1009
1010private:
1011 /// Helper to add branch weight and unpredictable metadata onto an
1012 /// instruction.
1013 /// \returns The annotated instruction.
1014 template <typename InstTy>
1015 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
1016 if (Weights)
1017 I->setMetadata(LLVMContext::MD_prof, Weights);
1018 if (Unpredictable)
1019 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
1020 return I;
1021 }
1022
1023public:
1024 /// Create a 'ret void' instruction.
1025 ReturnInst *CreateRetVoid() {
1026 return Insert(ReturnInst::Create(Context));
1027 }
1028
1029 /// Create a 'ret <val>' instruction.
1030 ReturnInst *CreateRet(Value *V) {
1031 return Insert(ReturnInst::Create(Context, V));
1032 }
1033
1034 /// Create a sequence of N insertvalue instructions,
1035 /// with one Value from the retVals array each, that build a aggregate
1036 /// return value one value at a time, and a ret instruction to return
1037 /// the resulting aggregate value.
1038 ///
1039 /// This is a convenience function for code that uses aggregate return values
1040 /// as a vehicle for having multiple return values.
1041 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
1042 Value *V = UndefValue::get(getCurrentFunctionReturnType());
1043 for (unsigned i = 0; i != N; ++i)
1044 V = CreateInsertValue(V, retVals[i], i, "mrv");
1045 return Insert(ReturnInst::Create(Context, V));
1046 }
1047
1048 /// Create an unconditional 'br label X' instruction.
1049 BranchInst *CreateBr(BasicBlock *Dest) {
1050 return Insert(BranchInst::Create(Dest));
1051 }
1052
1053 /// Create a conditional 'br Cond, TrueDest, FalseDest'
1054 /// instruction.
1055 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1056 MDNode *BranchWeights = nullptr,
1057 MDNode *Unpredictable = nullptr) {
1058 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
1059 BranchWeights, Unpredictable));
1060 }
1061
1062 /// Create a conditional 'br Cond, TrueDest, FalseDest'
1063 /// instruction. Copy branch meta data if available.
1064 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1065 Instruction *MDSrc) {
1066 BranchInst *Br = BranchInst::Create(True, False, Cond);
1067 if (MDSrc) {
1068 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
1069 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
1070 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
1071 }
1072 return Insert(Br);
1073 }
1074
1075 /// Create a switch instruction with the specified value, default dest,
1076 /// and with a hint for the number of cases that will be added (for efficient
1077 /// allocation).
1078 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1079 MDNode *BranchWeights = nullptr,
1080 MDNode *Unpredictable = nullptr) {
1081 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1082 BranchWeights, Unpredictable));
1083 }
1084
1085 /// Create an indirect branch instruction with the specified address
1086 /// operand, with an optional hint for the number of destinations that will be
1087 /// added (for efficient allocation).
1088 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1089 return Insert(IndirectBrInst::Create(Addr, NumDests));
1090 }
1091
1092 /// Create an invoke instruction.
1093 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1094 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1095 ArrayRef<Value *> Args,
1096 ArrayRef<OperandBundleDef> OpBundles,
1097 const Twine &Name = "") {
1098 InvokeInst *II =
1099 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1100 if (IsFPConstrained)
1101 setConstrainedFPCallAttr(II);
1102 return Insert(II, Name);
1103 }
1104 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1105 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1106 ArrayRef<Value *> Args = None,
1107 const Twine &Name = "") {
1108 InvokeInst *II =
1109 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1110 if (IsFPConstrained)
1111 setConstrainedFPCallAttr(II);
1112 return Insert(II, Name);
1113 }
1114
1115 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1116 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1117 ArrayRef<OperandBundleDef> OpBundles,
1118 const Twine &Name = "") {
1119 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1120 NormalDest, UnwindDest, Args, OpBundles, Name);
1121 }
1122
1123 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1124 BasicBlock *UnwindDest,
1125 ArrayRef<Value *> Args = None,
1126 const Twine &Name = "") {
1127 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1128 NormalDest, UnwindDest, Args, Name);
1129 }
1130
1131 /// \brief Create a callbr instruction.
1132 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1133 BasicBlock *DefaultDest,
1134 ArrayRef<BasicBlock *> IndirectDests,
1135 ArrayRef<Value *> Args = None,
1136 const Twine &Name = "") {
1137 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1138 Args), Name);
1139 }
1140 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1141 BasicBlock *DefaultDest,
1142 ArrayRef<BasicBlock *> IndirectDests,
1143 ArrayRef<Value *> Args,
1144 ArrayRef<OperandBundleDef> OpBundles,
1145 const Twine &Name = "") {
1146 return Insert(
1147 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1148 OpBundles), Name);
1149 }
1150
1151 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1152 ArrayRef<BasicBlock *> IndirectDests,
1153 ArrayRef<Value *> Args = None,
1154 const Twine &Name = "") {
1155 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1156 DefaultDest, IndirectDests, Args, Name);
1157 }
1158 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1159 ArrayRef<BasicBlock *> IndirectDests,
1160 ArrayRef<Value *> Args,
1161 ArrayRef<OperandBundleDef> OpBundles,
1162 const Twine &Name = "") {
1163 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1164 DefaultDest, IndirectDests, Args, Name);
1165 }
1166
1167 ResumeInst *CreateResume(Value *Exn) {
1168 return Insert(ResumeInst::Create(Exn));
1169 }
1170
1171 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1172 BasicBlock *UnwindBB = nullptr) {
1173 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1174 }
1175
1176 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1177 unsigned NumHandlers,
1178 const Twine &Name = "") {
1179 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1180 Name);
1181 }
1182
1183 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1184 const Twine &Name = "") {
1185 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1186 }
1187
1188 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1189 ArrayRef<Value *> Args = None,
1190 const Twine &Name = "") {
1191 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1192 }
1193
1194 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1195 return Insert(CatchReturnInst::Create(CatchPad, BB));
1196 }
1197
1198 UnreachableInst *CreateUnreachable() {
1199 return Insert(new UnreachableInst(Context));
1200 }
1201
1202 //===--------------------------------------------------------------------===//
1203 // Instruction creation methods: Binary Operators
1204 //===--------------------------------------------------------------------===//
1205private:
1206 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1207 Value *LHS, Value *RHS,
1208 const Twine &Name,
1209 bool HasNUW, bool HasNSW) {
1210 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1211 if (HasNUW) BO->setHasNoUnsignedWrap();
1212 if (HasNSW) BO->setHasNoSignedWrap();
1213 return BO;
1214 }
1215
1216 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1217 FastMathFlags FMF) const {
1218 if (!FPMD)
1219 FPMD = DefaultFPMathTag;
1220 if (FPMD)
1221 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1222 I->setFastMathFlags(FMF);
1223 return I;
1224 }
1225
1226 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1227 Value *R, const Twine &Name) const {
1228 auto *LC = dyn_cast<Constant>(L);
1229 auto *RC = dyn_cast<Constant>(R);
1230 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1231 }
1232
1233 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1234 RoundingMode UseRounding = DefaultConstrainedRounding;
1235
1236 if (Rounding.hasValue())
1237 UseRounding = Rounding.getValue();
1238
1239 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1240 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")((RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? static_cast<void> (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1240, __PRETTY_FUNCTION__))
;
1241 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1242
1243 return MetadataAsValue::get(Context, RoundingMDS);
1244 }
1245
1246 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1247 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1248
1249 if (Except.hasValue())
1250 UseExcept = Except.getValue();
1251
1252 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1253 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")((ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? static_cast<void> (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1253, __PRETTY_FUNCTION__))
;
1254 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1255
1256 return MetadataAsValue::get(Context, ExceptMDS);
1257 }
1258
1259 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1260 assert(CmpInst::isFPPredicate(Predicate) &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1263, __PRETTY_FUNCTION__))
1261 Predicate != CmpInst::FCMP_FALSE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1263, __PRETTY_FUNCTION__))
1262 Predicate != CmpInst::FCMP_TRUE &&((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1263, __PRETTY_FUNCTION__))
1263 "Invalid constrained FP comparison predicate!")((CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst
::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE &&
"Invalid constrained FP comparison predicate!") ? static_cast
<void> (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1263, __PRETTY_FUNCTION__))
;
1264
1265 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1266 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1267
1268 return MetadataAsValue::get(Context, PredicateMDS);
1269 }
1270
1271public:
1272 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1273 bool HasNUW = false, bool HasNSW = false) {
1274 if (auto *LC = dyn_cast<Constant>(LHS))
1275 if (auto *RC = dyn_cast<Constant>(RHS))
1276 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1277 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1278 HasNUW, HasNSW);
1279 }
1280
1281 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1282 return CreateAdd(LHS, RHS, Name, false, true);
1283 }
1284
1285 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1286 return CreateAdd(LHS, RHS, Name, true, false);
1287 }
1288
1289 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1290 bool HasNUW = false, bool HasNSW = false) {
1291 if (auto *LC = dyn_cast<Constant>(LHS))
1292 if (auto *RC = dyn_cast<Constant>(RHS))
1293 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1294 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1295 HasNUW, HasNSW);
1296 }
1297
1298 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1299 return CreateSub(LHS, RHS, Name, false, true);
1300 }
1301
1302 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1303 return CreateSub(LHS, RHS, Name, true, false);
1304 }
1305
1306 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1307 bool HasNUW = false, bool HasNSW = false) {
1308 if (auto *LC = dyn_cast<Constant>(LHS))
1309 if (auto *RC = dyn_cast<Constant>(RHS))
1310 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1311 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1312 HasNUW, HasNSW);
1313 }
1314
1315 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1316 return CreateMul(LHS, RHS, Name, false, true);
1317 }
1318
1319 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1320 return CreateMul(LHS, RHS, Name, true, false);
1321 }
1322
1323 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1324 bool isExact = false) {
1325 if (auto *LC = dyn_cast<Constant>(LHS))
1326 if (auto *RC = dyn_cast<Constant>(RHS))
1327 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1328 if (!isExact)
1329 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1330 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1331 }
1332
1333 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1334 return CreateUDiv(LHS, RHS, Name, true);
1335 }
1336
1337 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1338 bool isExact = false) {
1339 if (auto *LC = dyn_cast<Constant>(LHS))
1340 if (auto *RC = dyn_cast<Constant>(RHS))
1341 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1342 if (!isExact)
1343 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1344 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1345 }
1346
1347 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1348 return CreateSDiv(LHS, RHS, Name, true);
1349 }
1350
1351 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1352 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1353 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1354 }
1355
1356 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1357 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1358 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1359 }
1360
1361 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1362 bool HasNUW = false, bool HasNSW = false) {
1363 if (auto *LC = dyn_cast<Constant>(LHS))
1364 if (auto *RC = dyn_cast<Constant>(RHS))
1365 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1366 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1367 HasNUW, HasNSW);
1368 }
1369
1370 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1371 bool HasNUW = false, bool HasNSW = false) {
1372 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1373 HasNUW, HasNSW);
1374 }
1375
1376 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1377 bool HasNUW = false, bool HasNSW = false) {
1378 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1379 HasNUW, HasNSW);
1380 }
1381
1382 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1383 bool isExact = false) {
1384 if (auto *LC = dyn_cast<Constant>(LHS))
1385 if (auto *RC = dyn_cast<Constant>(RHS))
1386 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1387 if (!isExact)
1388 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1389 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1390 }
1391
1392 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1393 bool isExact = false) {
1394 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1395 }
1396
1397 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1398 bool isExact = false) {
1399 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1400 }
1401
1402 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1403 bool isExact = false) {
1404 if (auto *LC = dyn_cast<Constant>(LHS))
1405 if (auto *RC = dyn_cast<Constant>(RHS))
1406 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1407 if (!isExact)
1408 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1409 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1410 }
1411
1412 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1413 bool isExact = false) {
1414 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1415 }
1416
1417 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1418 bool isExact = false) {
1419 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1420 }
1421
1422 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1423 if (auto *RC = dyn_cast<Constant>(RHS)) {
1424 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1425 return LHS; // LHS & -1 -> LHS
1426 if (auto *LC = dyn_cast<Constant>(LHS))
1427 return Insert(Folder.CreateAnd(LC, RC), Name);
1428 }
1429 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1430 }
1431
1432 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1433 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1434 }
1435
1436 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1437 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1438 }
1439
1440 Value *CreateAnd(ArrayRef<Value*> Ops) {
1441 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1441, __PRETTY_FUNCTION__))
;
1442 Value *Accum = Ops[0];
1443 for (unsigned i = 1; i < Ops.size(); i++)
1444 Accum = CreateAnd(Accum, Ops[i]);
1445 return Accum;
1446 }
1447
1448 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1449 if (auto *RC = dyn_cast<Constant>(RHS)) {
1450 if (RC->isNullValue())
1451 return LHS; // LHS | 0 -> LHS
1452 if (auto *LC = dyn_cast<Constant>(LHS))
1453 return Insert(Folder.CreateOr(LC, RC), Name);
1454 }
1455 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1456 }
1457
1458 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1459 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1460 }
1461
1462 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1463 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1464 }
1465
1466 Value *CreateOr(ArrayRef<Value*> Ops) {
1467 assert(!Ops.empty())((!Ops.empty()) ? static_cast<void> (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 1467, __PRETTY_FUNCTION__))
;
1468 Value *Accum = Ops[0];
1469 for (unsigned i = 1; i < Ops.size(); i++)
1470 Accum = CreateOr(Accum, Ops[i]);
1471 return Accum;
1472 }
1473
1474 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1475 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1476 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1477 }
1478
1479 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1480 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1481 }
1482
1483 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1484 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1485 }
1486
1487 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1488 MDNode *FPMD = nullptr) {
1489 if (IsFPConstrained)
1490 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1491 L, R, nullptr, Name, FPMD);
1492
1493 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1494 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1495 return Insert(I, Name);
1496 }
1497
1498 /// Copy fast-math-flags from an instruction rather than using the builder's
1499 /// default FMF.
1500 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1501 const Twine &Name = "") {
1502 if (IsFPConstrained)
1503 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1504 L, R, FMFSource, Name);
1505
1506 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1507 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1508 FMFSource->getFastMathFlags());
1509 return Insert(I, Name);
1510 }
1511
1512 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1513 MDNode *FPMD = nullptr) {
1514 if (IsFPConstrained)
1515 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1516 L, R, nullptr, Name, FPMD);
1517
1518 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1519 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1520 return Insert(I, Name);
1521 }
1522
1523 /// Copy fast-math-flags from an instruction rather than using the builder's
1524 /// default FMF.
1525 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1526 const Twine &Name = "") {
1527 if (IsFPConstrained)
1528 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1529 L, R, FMFSource, Name);
1530
1531 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1532 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1533 FMFSource->getFastMathFlags());
1534 return Insert(I, Name);
1535 }
1536
1537 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1538 MDNode *FPMD = nullptr) {
1539 if (IsFPConstrained)
1540 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1541 L, R, nullptr, Name, FPMD);
1542
1543 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1544 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1545 return Insert(I, Name);
1546 }
1547
1548 /// Copy fast-math-flags from an instruction rather than using the builder's
1549 /// default FMF.
1550 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1551 const Twine &Name = "") {
1552 if (IsFPConstrained)
1553 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1554 L, R, FMFSource, Name);
1555
1556 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1557 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1558 FMFSource->getFastMathFlags());
1559 return Insert(I, Name);
1560 }
1561
1562 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1563 MDNode *FPMD = nullptr) {
1564 if (IsFPConstrained)
1565 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1566 L, R, nullptr, Name, FPMD);
1567
1568 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1569 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1570 return Insert(I, Name);
1571 }
1572
1573 /// Copy fast-math-flags from an instruction rather than using the builder's
1574 /// default FMF.
1575 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1576 const Twine &Name = "") {
1577 if (IsFPConstrained)
1578 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1579 L, R, FMFSource, Name);
1580
1581 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1582 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1583 FMFSource->getFastMathFlags());
1584 return Insert(I, Name);
1585 }
1586
1587 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1588 MDNode *FPMD = nullptr) {
1589 if (IsFPConstrained)
1590 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1591 L, R, nullptr, Name, FPMD);
1592
1593 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1594 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1595 return Insert(I, Name);
1596 }
1597
1598 /// Copy fast-math-flags from an instruction rather than using the builder's
1599 /// default FMF.
1600 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1601 const Twine &Name = "") {
1602 if (IsFPConstrained)
1603 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1604 L, R, FMFSource, Name);
1605
1606 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1607 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1608 FMFSource->getFastMathFlags());
1609 return Insert(I, Name);
1610 }
1611
1612 Value *CreateBinOp(Instruction::BinaryOps Opc,
1613 Value *LHS, Value *RHS, const Twine &Name = "",
1614 MDNode *FPMathTag = nullptr) {
1615 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1616 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1617 if (isa<FPMathOperator>(BinOp))
1618 setFPAttrs(BinOp, FPMathTag, FMF);
1619 return Insert(BinOp, Name);
1620 }
1621
1622 CallInst *CreateConstrainedFPBinOp(
1623 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1624 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1625 Optional<RoundingMode> Rounding = None,
1626 Optional<fp::ExceptionBehavior> Except = None);
1627
1628 Value *CreateNeg(Value *V, const Twine &Name = "",
1629 bool HasNUW = false, bool HasNSW = false) {
1630 if (auto *VC = dyn_cast<Constant>(V))
1631 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1632 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1633 if (HasNUW) BO->setHasNoUnsignedWrap();
1634 if (HasNSW) BO->setHasNoSignedWrap();
1635 return BO;
1636 }
1637
1638 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1639 return CreateNeg(V, Name, false, true);
1640 }
1641
1642 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1643 return CreateNeg(V, Name, true, false);
1644 }
1645
1646 Value *CreateFNeg(Value *V, const Twine &Name = "",
1647 MDNode *FPMathTag = nullptr) {
1648 if (auto *VC = dyn_cast<Constant>(V))
1649 return Insert(Folder.CreateFNeg(VC), Name);
1650 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1651 Name);
1652 }
1653
1654 /// Copy fast-math-flags from an instruction rather than using the builder's
1655 /// default FMF.
1656 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1657 const Twine &Name = "") {
1658 if (auto *VC = dyn_cast<Constant>(V))
1659 return Insert(Folder.CreateFNeg(VC), Name);
1660 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1661 FMFSource->getFastMathFlags()),
1662 Name);
1663 }
1664
1665 Value *CreateNot(Value *V, const Twine &Name = "") {
1666 if (auto *VC = dyn_cast<Constant>(V))
1667 return Insert(Folder.CreateNot(VC), Name);
1668 return Insert(BinaryOperator::CreateNot(V), Name);
1669 }
1670
1671 Value *CreateUnOp(Instruction::UnaryOps Opc,
1672 Value *V, const Twine &Name = "",
1673 MDNode *FPMathTag = nullptr) {
1674 if (auto *VC = dyn_cast<Constant>(V))
1675 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1676 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1677 if (isa<FPMathOperator>(UnOp))
1678 setFPAttrs(UnOp, FPMathTag, FMF);
1679 return Insert(UnOp, Name);
1680 }
1681
1682 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1683 /// Correct number of operands must be passed accordingly.
1684 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1685 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1686
1687 //===--------------------------------------------------------------------===//
1688 // Instruction creation methods: Memory Instructions
1689 //===--------------------------------------------------------------------===//
1690
1691 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1692 Value *ArraySize = nullptr, const Twine &Name = "") {
1693 const DataLayout &DL = BB->getModule()->getDataLayout();
1694 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1695 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1696 }
1697
1698 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1699 const Twine &Name = "") {
1700 const DataLayout &DL = BB->getModule()->getDataLayout();
9
Called C++ object pointer is null
1701 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1702 unsigned AddrSpace = DL.getAllocaAddrSpace();
1703 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1704 }
1705
1706 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1707 /// converting the string to 'bool' for the isVolatile parameter.
1708 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1709 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1710 }
1711
1712 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1713 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1714 }
1715
1716 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1717 const Twine &Name = "") {
1718 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1719 }
1720
1721 // Deprecated [opaque pointer types]
1722 LoadInst *CreateLoad(Value *Ptr, const char *Name) {
1723 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1724 }
1725
1726 // Deprecated [opaque pointer types]
1727 LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
1728 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1729 }
1730
1731 // Deprecated [opaque pointer types]
1732 LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
1733 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1734 Name);
1735 }
1736
1737 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1738 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1739 }
1740
1741 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,[[deprecated("Use the version that takes NaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const char *Name)
1742 unsigned Align,[[deprecated("Use the version that takes NaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const char *Name)
1743 const char *Name),[[deprecated("Use the version that takes NaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const char *Name)
1744 "Use the version that takes NaybeAlign instead")[[deprecated("Use the version that takes NaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const char *Name)
{
1745 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
1746 }
1747 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1748 const char *Name) {
1749 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1750 }
1751
1752 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const Twine &Name = "")
1753 unsigned Align,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const Twine &Name = "")
1754 const Twine &Name = ""),[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const Twine &Name = "")
1755 "Use the version that takes MaybeAlign instead")[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, const Twine &Name = "")
{
1756 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name);
1757 }
1758 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1759 const Twine &Name = "") {
1760 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1761 }
1762
1763 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, bool isVolatile, const Twine &Name = "")
1764 unsigned Align,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, bool isVolatile, const Twine &Name = "")
1765 bool isVolatile,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, bool isVolatile, const Twine &Name = "")
1766 const Twine &Name = ""),[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, bool isVolatile, const Twine &Name = "")
1767 "Use the version that takes MaybeAlign instead")[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned
Align, bool isVolatile, const Twine &Name = "")
{
1768 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name);
1769 }
1770 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1771 bool isVolatile, const Twine &Name = "") {
1772 if (!Align) {
1773 const DataLayout &DL = BB->getModule()->getDataLayout();
1774 Align = DL.getABITypeAlign(Ty);
1775 }
1776 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1777 }
1778
1779 // Deprecated [opaque pointer types]
1780 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name)
1781 unsigned Align,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name)
1782 const char *Name),[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name)
1783 "Use the version that takes MaybeAlign instead")[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
char *Name)
{
1784 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1785 MaybeAlign(Align), Name);
1786 }
1787 // Deprecated [opaque pointer types]
1788 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "")
1789 unsigned Align,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "")
1790 const Twine &Name = ""),[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "")
1791 "Use the version that takes MaybeAlign instead")[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const
Twine &Name = "")
{
1792 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1793 MaybeAlign(Align), Name);
1794 }
1795 // Deprecated [opaque pointer types]
1796 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool
isVolatile, const Twine &Name = "")
1797 unsigned Align,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool
isVolatile, const Twine &Name = "")
1798 bool isVolatile,[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool
isVolatile, const Twine &Name = "")
1799 const Twine &Name = ""),[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool
isVolatile, const Twine &Name = "")
1800 "Use the version that takes MaybeAlign instead")[[deprecated("Use the version that takes MaybeAlign instead")
]] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool
isVolatile, const Twine &Name = "")
{
1801 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1802 MaybeAlign(Align), isVolatile, Name);
1803 }
1804 // Deprecated [opaque pointer types]
1805 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) {
1806 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1807 Align, Name);
1808 }
1809 // Deprecated [opaque pointer types]
1810 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align,
1811 const Twine &Name = "") {
1812 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1813 Align, Name);
1814 }
1815 // Deprecated [opaque pointer types]
1816 LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, bool isVolatile,
1817 const Twine &Name = "") {
1818 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1819 Align, isVolatile, Name);
1820 }
1821
1822 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version that takes MaybeAlign instead")
]] StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false)
1823 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,[[deprecated("Use the version that takes MaybeAlign instead")
]] StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false)
1824 bool isVolatile = false),[[deprecated("Use the version that takes MaybeAlign instead")
]] StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false)
1825 "Use the version that takes MaybeAlign instead")[[deprecated("Use the version that takes MaybeAlign instead")
]] StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned
Align, bool isVolatile = false)
{
1826 return CreateAlignedStore(Val, Ptr, MaybeAlign(Align), isVolatile);
1827 }
1828 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1829 bool isVolatile = false) {
1830 if (!Align) {
1831 const DataLayout &DL = BB->getModule()->getDataLayout();
1832 Align = DL.getABITypeAlign(Val->getType());
1833 }
1834 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1835 }
1836 FenceInst *CreateFence(AtomicOrdering Ordering,
1837 SyncScope::ID SSID = SyncScope::System,
1838 const Twine &Name = "") {
1839 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1840 }
1841
1842 AtomicCmpXchgInst *CreateAtomicCmpXchg(
1843 Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering,
1844 AtomicOrdering FailureOrdering, SyncScope::ID SSID = SyncScope::System) {
1845 const DataLayout &DL = BB->getModule()->getDataLayout();
1846 Align Alignment(DL.getTypeStoreSize(New->getType()));
1847 return Insert(new AtomicCmpXchgInst(
1848 Ptr, Cmp, New, Alignment, SuccessOrdering, FailureOrdering, SSID));
1849 }
1850
1851 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
1852 AtomicOrdering Ordering,
1853 SyncScope::ID SSID = SyncScope::System) {
1854 const DataLayout &DL = BB->getModule()->getDataLayout();
1855 Align Alignment(DL.getTypeStoreSize(Val->getType()));
1856 return Insert(new AtomicRMWInst(Op, Ptr, Val, Alignment, Ordering, SSID));
1857 }
1858
1859 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1860 const Twine &Name = "") {
1861 return CreateGEP(nullptr, Ptr, IdxList, Name);
1862 }
1863
1864 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1865 const Twine &Name = "") {
1866 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1867 // Every index must be constant.
1868 size_t i, e;
1869 for (i = 0, e = IdxList.size(); i != e; ++i)
1870 if (!isa<Constant>(IdxList[i]))
1871 break;
1872 if (i == e)
1873 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1874 }
1875 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1876 }
1877
1878 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1879 const Twine &Name = "") {
1880 return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
1881 }
1882
1883 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1884 const Twine &Name = "") {
1885 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1886 // Every index must be constant.
1887 size_t i, e;
1888 for (i = 0, e = IdxList.size(); i != e; ++i)
1889 if (!isa<Constant>(IdxList[i]))
1890 break;
1891 if (i == e)
1892 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1893 Name);
1894 }
1895 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1896 }
1897
1898 Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
1899 return CreateGEP(nullptr, Ptr, Idx, Name);
1900 }
1901
1902 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1903 if (auto *PC = dyn_cast<Constant>(Ptr))
1904 if (auto *IC = dyn_cast<Constant>(Idx))
1905 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1906 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1907 }
1908
1909 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1910 const Twine &Name = "") {
1911 if (auto *PC = dyn_cast<Constant>(Ptr))
1912 if (auto *IC = dyn_cast<Constant>(Idx))
1913 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1914 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1915 }
1916
1917 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
1918 return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
1919 }
1920
1921 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1922 const Twine &Name = "") {
1923 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1924
1925 if (auto *PC = dyn_cast<Constant>(Ptr))
1926 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1927
1928 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1929 }
1930
1931 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1932 const Twine &Name = "") {
1933 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1934
1935 if (auto *PC = dyn_cast<Constant>(Ptr))
1936 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1937
1938 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1939 }
1940
1941 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1942 const Twine &Name = "") {
1943 Value *Idxs[] = {
1944 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1945 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1946 };
1947
1948 if (auto *PC = dyn_cast<Constant>(Ptr))
1949 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1950
1951 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1952 }
1953
1954 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1955 unsigned Idx1, const Twine &Name = "") {
1956 Value *Idxs[] = {
1957 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1958 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1959 };
1960
1961 if (auto *PC = dyn_cast<Constant>(Ptr))
1962 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1963
1964 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1965 }
1966
1967 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1968 const Twine &Name = "") {
1969 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1970
1971 if (auto *PC = dyn_cast<Constant>(Ptr))
1972 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1973
1974 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1975 }
1976
1977 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
1978 return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
1979 }
1980
1981 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1982 const Twine &Name = "") {
1983 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1984
1985 if (auto *PC = dyn_cast<Constant>(Ptr))
1986 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1987
1988 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1989 }
1990
1991 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
1992 const Twine &Name = "") {
1993 return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
1994 }
1995
1996 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1997 const Twine &Name = "") {
1998 Value *Idxs[] = {
1999 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
2000 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
2001 };
2002
2003 if (auto *PC = dyn_cast<Constant>(Ptr))
2004 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
2005
2006 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
2007 }
2008
2009 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
2010 const Twine &Name = "") {
2011 return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
2012 }
2013
2014 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
2015 uint64_t Idx1, const Twine &Name = "") {
2016 Value *Idxs[] = {
2017 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
2018 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
2019 };
2020
2021 if (auto *PC = dyn_cast<Constant>(Ptr))
2022 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
2023
2024 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
2025 }
2026
2027 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
2028 const Twine &Name = "") {
2029 return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
2030 }
2031
2032 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
2033 const Twine &Name = "") {
2034 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
2035 }
2036
2037 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") {
2038 return CreateConstInBoundsGEP2_32(nullptr, Ptr, 0, Idx, Name);
2039 }
2040
2041 /// Same as CreateGlobalString, but return a pointer with "i8*" type
2042 /// instead of a pointer to array of i8.
2043 ///
2044 /// If no module is given via \p M, it is take from the insertion point basic
2045 /// block.
2046 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
2047 unsigned AddressSpace = 0,
2048 Module *M = nullptr) {
2049 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
2050 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2051 Constant *Indices[] = {Zero, Zero};
2052 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
2053 Indices);
2054 }
2055
2056 //===--------------------------------------------------------------------===//
2057 // Instruction creation methods: Cast/Conversion Operators
2058 //===--------------------------------------------------------------------===//
2059
2060 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
2061 return CreateCast(Instruction::Trunc, V, DestTy, Name);
2062 }
2063
2064 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
2065 return CreateCast(Instruction::ZExt, V, DestTy, Name);
2066 }
2067
2068 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
2069 return CreateCast(Instruction::SExt, V, DestTy, Name);
2070 }
2071
2072 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
2073 /// the value untouched if the type of V is already DestTy.
2074 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
2075 const Twine &Name = "") {
2076 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 2078, __PRETTY_FUNCTION__))
2077 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 2078, __PRETTY_FUNCTION__))
2078 "Can only zero extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 2078, __PRETTY_FUNCTION__))
;
2079 Type *VTy = V->getType();
2080 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2081 return CreateZExt(V, DestTy, Name);
2082 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2083 return CreateTrunc(V, DestTy, Name);
2084 return V;
2085 }
2086
2087 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2088 /// the value untouched if the type of V is already DestTy.
2089 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2090 const Twine &Name = "") {
2091 assert(V->getType()->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 2093, __PRETTY_FUNCTION__))
2092 DestTy->isIntOrIntVectorTy() &&((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 2093, __PRETTY_FUNCTION__))
2093 "Can only sign extend/truncate integers!")((V->getType()->isIntOrIntVectorTy() && DestTy->
isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? static_cast<void> (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-12~++20210124100612+2afaf072f5c1/llvm/include/llvm/IR/IRBuilder.h"
, 2093, __PRETTY_FUNCTION__))
;
2094 Type *VTy = V->getType();
2095 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2096 return CreateSExt(V, DestTy, Name);
2097 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2098 return CreateTrunc(V, DestTy, Name);
2099 return V;
2100 }
2101
2102 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2103 if (IsFPConstrained)
2104 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2105 V, DestTy, nullptr, Name);
2106 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2107 }
2108
2109 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2110 if (IsFPConstrained)
2111 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2112 V, DestTy, nullptr, Name);
2113 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2114 }
2115
2116 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2117 if (IsFPConstrained)
2118 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2119 V, DestTy, nullptr, Name);
2120 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2121 }
2122
2123 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2124 if (IsFPConstrained)
2125 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2126 V, DestTy, nullptr, Name);
2127 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2128 }
2129
2130 Value *CreateFPTrunc(Value *V, Type *DestTy,
2131 const Twine &Name = "") {
2132 if (IsFPConstrained)
2133 return CreateConstrainedFPCast(
2134 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2135 Name);
2136 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2137 }
2138
2139 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2140 if (IsFPConstrained)
2141 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2142 V, DestTy, nullptr, Name);
2143 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2144 }
2145
2146 Value *CreatePtrToInt(Value *V, Type *DestTy,
2147 const Twine &Name = "") {
2148 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2149 }
2150
2151 Value *CreateIntToPtr(Value *V, Type *DestTy,
2152 const Twine &Name = "") {
2153 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2154 }
2155
2156 Value *CreateBitCast(Value *V, Type *DestTy,
2157 const Twine &Name = "") {
2158 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2159 }
2160
2161 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2162 const Twine &Name = "") {
2163 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2164 }
2165
2166 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2167 const Twine &Name = "") {
2168 if (V->getType() == DestTy)
2169 return V;
2170 if (auto *VC = dyn_cast<Constant>(V))
2171 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2172 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2173 }
2174
2175 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2176 const Twine &Name = "") {
2177 if (V->getType() == DestTy)
2178 return V;
2179 if (auto *VC = dyn_cast<Constant>(V))
2180 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2181 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2182 }
2183
2184 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2185 const Twine &Name = "") {
2186 if (V->getType() == DestTy)
2187 return V;
2188 if (auto *VC = dyn_cast<Constant>(V))
2189 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2190 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2191 }
2192
2193 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2194 const Twine &Name = "") {
2195 if (V->getType() == DestTy)
2196 return V;
2197 if (auto *VC = dyn_cast<Constant>(V))
2198 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2199 return Insert(CastInst::Create(Op, V, DestTy), Name);
2200 }
2201
2202 Value *CreatePointerCast(Value *V, Type *DestTy,
2203 const Twine &Name = "") {
2204 if (V->getType() == DestTy)
2205 return V;
2206 if (auto *VC = dyn_cast<Constant>(V))
2207 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2208 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2209 }
2210
2211 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2212 const Twine &Name = "") {
2213 if (V->getType() == DestTy)
2214 return V;
2215
2216 if (auto *VC = dyn_cast<Constant>(V)) {
2217 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2218 Name);
2219 }
2220
2221 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2222 Name);
2223 }
2224
2225 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2226 const Twine &Name = "") {
2227 if (V->getType() == DestTy)
2228 return V;
2229 if (auto *VC = dyn_cast<Constant>(V))
2230 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2231 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2232 }
2233
2234 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2235 const Twine &Name = "") {
2236 if (V->getType() == DestTy)
2237 return V;
2238 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2239 return CreatePtrToInt(V, DestTy, Name);
2240 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2241 return CreateIntToPtr(V, DestTy, Name);
2242
2243 return CreateBitCast(V, DestTy, Name);
2244 }
2245
2246 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2247 if (V->getType() == DestTy)
2248 return V;
2249 if (auto *VC = dyn_cast<Constant>(V))
2250 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2251 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2252 }
2253
2254 CallInst *CreateConstrainedFPCast(
2255 Intrinsic::ID ID, Value *V, Type *DestTy,
2256 Instruction *FMFSource = nullptr, const Twine &Name = "",
2257 MDNode *FPMathTag = nullptr,
2258 Optional<RoundingMode> Rounding = None,
2259 Optional<fp::ExceptionBehavior> Except = None);
2260
2261 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2262 // compile time error, instead of converting the string to bool for the
2263 // isSigned parameter.
2264 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2265
2266 //===--------------------------------------------------------------------===//
2267 // Instruction creation methods: Compare Instructions
2268 //===--------------------------------------------------------------------===//
2269
2270 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2271 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2272 }
2273
2274 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2275 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2276 }
2277
2278 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2279 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2280 }
2281
2282 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2283 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2284 }
2285
2286 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2287 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2288 }
2289
2290 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2291 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2292 }
2293
2294 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2295 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2296 }
2297
2298 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2299 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2300 }
2301
2302 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2303 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2304 }
2305
2306 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2307 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2308 }
2309
2310 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2311 MDNode *FPMathTag = nullptr) {
2312 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2313 }
2314
2315 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2316 MDNode *FPMathTag = nullptr) {
2317 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2318 }
2319
2320 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2321 MDNode *FPMathTag = nullptr) {
2322 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2323 }
2324
2325 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2326 MDNode *FPMathTag = nullptr) {
2327 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2328 }
2329
2330 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2331 MDNode *FPMathTag = nullptr) {
2332 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2333 }
2334
2335 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2336 MDNode *FPMathTag = nullptr) {
2337 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2338 }
2339
2340 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2341 MDNode *FPMathTag = nullptr) {
2342 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2343 }
2344
2345 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2346 MDNode *FPMathTag = nullptr) {
2347 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2348 }
2349
2350 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2351 MDNode *FPMathTag = nullptr) {
2352 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2353 }
2354
2355 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2356 MDNode *FPMathTag = nullptr) {
2357 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2358 }
2359
2360 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2361 MDNode *FPMathTag = nullptr) {
2362 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2363 }
2364
2365 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2366 MDNode *FPMathTag = nullptr) {
2367 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2368 }
2369
2370 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2371 MDNode *FPMathTag = nullptr) {
2372 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2373 }
2374
2375 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2376 MDNode *FPMathTag = nullptr) {
2377 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2378 }
2379
2380 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2381 const Twine &Name = "") {
2382 if (auto *LC = dyn_cast<Constant>(LHS))
2383 if (auto *RC = dyn_cast<Constant>(RHS))
2384 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2385 return Insert(new ICmpInst(P, LHS, RHS), Name);
2386 }
2387
2388 // Create a quiet floating-point comparison (i.e. one that raises an FP
2389 // exception only in the case where an input is a signaling NaN).
2390 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2391 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2392 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2393 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2394 }
2395
2396 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2397 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2398 return CmpInst::isFPPredicate(Pred)
2399 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2400 : CreateICmp(Pred, LHS, RHS, Name);
2401 }
2402
2403 // Create a signaling floating-point comparison (i.e. one that raises an FP
2404 // exception whenever an input is any NaN, signaling or quiet).
2405 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2406 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2407 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2408 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2409 }
2410
2411private:
2412 // Helper routine to create either a signaling or a quiet FP comparison.
2413 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2414 const Twine &Name, MDNode *FPMathTag,
2415 bool IsSignaling);
2416
2417public:
2418 CallInst *CreateConstrainedFPCmp(
2419 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2420 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2421
2422 //===--------------------------------------------------------------------===//
2423 // Instruction creation methods: Other Instructions
2424 //===--------------------------------------------------------------------===//
2425
2426 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2427 const Twine &Name = "") {
2428 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2429 if (isa<FPMathOperator>(Phi))
2430 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2431 return Insert(Phi, Name);
2432 }
2433
2434 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2435 ArrayRef<Value *> Args = None, const Twine &Name = "",
2436 MDNode *FPMathTag = nullptr) {
2437 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2438 if (IsFPConstrained)
2439 setConstrainedFPCallAttr(CI);
2440 if (isa<FPMathOperator>(CI))
2441 setFPAttrs(CI, FPMathTag, FMF);
2442 return Insert(CI, Name);
2443 }
2444
2445 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2446 ArrayRef<OperandBundleDef> OpBundles,
2447 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2448 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2449 if (IsFPConstrained)
2450 setConstrainedFPCallAttr(CI);
2451 if (isa<FPMathOperator>(CI))
2452 setFPAttrs(CI, FPMathTag, FMF);
2453 return Insert(CI, Name);
2454 }
2455
2456 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2457 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2458 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2459 FPMathTag);
2460 }
2461
2462 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2463 ArrayRef<OperandBundleDef> OpBundles,
2464 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2465 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2466 OpBundles, Name, FPMathTag);
2467 }
2468
2469 CallInst *CreateConstrainedFPCall(
2470 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2471 Optional<RoundingMode> Rounding = None,
2472 Optional<fp::ExceptionBehavior> Except = None);
2473
2474 Value *CreateSelect(Value *C, Value *True, Value *False,
2475 const Twine &Name = "", Instruction *MDFrom = nullptr);
2476
2477 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2478 return Insert(new VAArgInst(List, Ty), Name);
2479 }
2480
2481 Value *CreateExtractElement(Value *Vec, Value *Idx,
2482 const Twine &Name = "") {
2483 if (auto *VC = dyn_cast<Constant>(Vec))
2484 if (auto *IC = dyn_cast<Constant>(Idx))
2485 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2486 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2487 }
2488
2489 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2490 const Twine &Name = "") {
2491 return CreateExtractElement(Vec, getInt64(Idx), Name);
2492 }
2493
2494 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2495 const Twine &Name = "") {
2496 if (auto *VC = dyn_cast<Constant>(Vec))
2497 if (auto *NC = dyn_cast<Constant>(NewElt))
2498 if (auto *IC = dyn_cast<Constant>(Idx))
2499 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2500 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2501 }
2502
2503 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2504 const Twine &Name = "") {
2505 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2506 }
2507
2508 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2509 const Twine &Name = "") {
2510 SmallVector<int, 16> IntMask;
2511 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2512 return CreateShuffleVector(V1, V2, IntMask, Name);
2513 }
2514
2515 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2516 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2517 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2518 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2519 SmallVector<int, 16> IntMask;
2520 IntMask.assign(Mask.begin(), Mask.end());
2521 return CreateShuffleVector(V1, V2, IntMask, Name);
2522 }
2523
2524 /// See class ShuffleVectorInst for a description of the mask representation.
2525 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2526 const Twine &Name = "") {
2527 if (auto *V1C = dyn_cast<Constant>(V1))
2528 if (auto *V2C = dyn_cast<Constant>(V2))
2529 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2530 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2531 }
2532
2533 /// Create a unary shuffle. The second vector operand of the IR instruction
2534 /// is poison.
2535 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2536 const Twine &Name = "") {
2537 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2538 }
2539
2540 Value *CreateExtractValue(Value *Agg,
2541 ArrayRef<unsigned> Idxs,
2542 const Twine &Name = "") {
2543 if (auto *AggC = dyn_cast<Constant>(Agg))
2544 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2545 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2546 }
2547
2548 Value *CreateInsertValue(Value *Agg, Value *Val,
2549 ArrayRef<unsigned> Idxs,
2550 const Twine &Name = "") {
2551 if (auto *AggC = dyn_cast<Constant>(Agg))
2552 if (auto *ValC = dyn_cast<Constant>(Val))
2553 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2554 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2555 }
2556
2557 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2558 const Twine &Name = "") {
2559 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2560 }
2561
2562 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2563 return Insert(new FreezeInst(V), Name);
2564 }
2565
2566 //===--------------------------------------------------------------------===//
2567 // Utility creation methods
2568 //===--------------------------------------------------------------------===//
2569
2570 /// Return an i1 value testing if \p Arg is null.
2571 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2572 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2573 Name);
2574 }
2575
2576 /// Return an i1 value testing if \p Arg is not null.
2577 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2578 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2579 Name);
2580 }
2581
2582 /// Return the i64 difference between two pointer values, dividing out
2583 /// the size of the pointed-to objects.
2584 ///
2585 /// This is intended to implement C-style pointer subtraction. As such, the
2586 /// pointers must be appropriately aligned for their element types and
2587 /// pointing into the same object.
2588 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2589
2590 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2591 /// different from pointer to i8, it's casted to pointer to i8 in the same
2592 /// address space before call and casted back to Ptr type after call.
2593 Value *CreateLaunderInvariantGroup(Value *Ptr);
2594
2595 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2596 /// different from pointer to i8, it's casted to pointer to i8 in the same
2597 /// address space before call and casted back to Ptr type after call.
2598 Value *CreateStripInvariantGroup(Value *Ptr);
2599
2600 /// Return a vector value that contains \arg V broadcasted to \p
2601 /// NumElts elements.
2602 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2603
2604 /// Return a vector value that contains \arg V broadcasted to \p
2605 /// EC elements.
2606 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2607
2608 /// Return a value that has been extracted from a larger integer type.
2609 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2610 IntegerType *ExtractedTy, uint64_t Offset,
2611 const Twine &Name);
2612
2613 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2614 unsigned Dimension, unsigned LastIndex,
2615 MDNode *DbgInfo);
2616
2617 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2618 MDNode *DbgInfo);
2619
2620 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2621 unsigned Index, unsigned FieldIndex,
2622 MDNode *DbgInfo);
2623
2624private:
2625 /// Helper function that creates an assume intrinsic call that
2626 /// represents an alignment assumption on the provided pointer \p PtrValue
2627 /// with offset \p OffsetValue and alignment value \p AlignValue.
2628 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2629 Value *PtrValue, Value *AlignValue,
2630 Value *OffsetValue);
2631
2632public:
2633 /// Create an assume intrinsic call that represents an alignment
2634 /// assumption on the provided pointer.
2635 ///
2636 /// An optional offset can be provided, and if it is provided, the offset
2637 /// must be subtracted from the provided pointer to get the pointer with the
2638 /// specified alignment.
2639 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2640 unsigned Alignment,
2641 Value *OffsetValue = nullptr);
2642
2643 /// Create an assume intrinsic call that represents an alignment
2644 /// assumption on the provided pointer.
2645 ///
2646 /// An optional offset can be provided, and if it is provided, the offset
2647 /// must be subtracted from the provided pointer to get the pointer with the
2648 /// specified alignment.
2649 ///
2650 /// This overload handles the condition where the Alignment is dependent
2651 /// on an existing value rather than a static value.
2652 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2653 Value *Alignment,
2654 Value *OffsetValue = nullptr);
2655};
2656
2657/// This provides a uniform API for creating instructions and inserting
2658/// them into a basic block: either at the end of a BasicBlock, or at a specific
2659/// iterator location in a block.
2660///
2661/// Note that the builder does not expose the full generality of LLVM
2662/// instructions. For access to extra instruction properties, use the mutators
2663/// (e.g. setVolatile) on the instructions after they have been
2664/// created. Convenience state exists to specify fast-math flags and fp-math
2665/// tags.
2666///
2667/// The first template argument specifies a class to use for creating constants.
2668/// This defaults to creating minimally folded constants. The second template
2669/// argument allows clients to specify custom insertion hooks that are called on
2670/// every newly created insertion.
2671template <typename FolderTy = ConstantFolder,
2672 typename InserterTy = IRBuilderDefaultInserter>
2673class IRBuilder : public IRBuilderBase {
2674private:
2675 FolderTy Folder;
2676 InserterTy Inserter;
2677
2678public:
2679 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2680 MDNode *FPMathTag = nullptr,
2681 ArrayRef<OperandBundleDef> OpBundles = None)
2682 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2683 Folder(Folder), Inserter(Inserter) {}
2684
2685 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2686 ArrayRef<OperandBundleDef> OpBundles = None)
2687 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2688
2689 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2690 MDNode *FPMathTag = nullptr,
2691 ArrayRef<OperandBundleDef> OpBundles = None)
2692 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2693 FPMathTag, OpBundles), Folder(Folder) {
2694 SetInsertPoint(TheBB);
2695 }
2696
2697 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2698 ArrayRef<OperandBundleDef> OpBundles = None)
2699 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2700 FPMathTag, OpBundles) {
2701 SetInsertPoint(TheBB);
2702 }
2703
2704 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2705 ArrayRef<OperandBundleDef> OpBundles = None)
2706 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
2707 FPMathTag, OpBundles) {
2708 SetInsertPoint(IP);
2709 }
2710
2711 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2712 MDNode *FPMathTag = nullptr,
2713 ArrayRef<OperandBundleDef> OpBundles = None)
2714 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2715 FPMathTag, OpBundles), Folder(Folder) {
2716 SetInsertPoint(TheBB, IP);
2717 }
2718
2719 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2720 MDNode *FPMathTag = nullptr,
2721 ArrayRef<OperandBundleDef> OpBundles = None)
2722 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2723 FPMathTag, OpBundles) {
2724 SetInsertPoint(TheBB, IP);
2725 }
2726
2727 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2728 /// or FastMathFlagGuard instead.
2729 IRBuilder(const IRBuilder &) = delete;
2730
2731 InserterTy &getInserter() { return Inserter; }
2732};
2733
2734// Create wrappers for C Binding types (see CBindingWrapping.h).
2735DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2736
2737} // end namespace llvm
2738
2739#endif // LLVM_IR_IRBUILDER_H