Bug Summary

File:llvm/include/llvm/IR/IRBuilder.h
Warning:line 1712, column 30
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name OMPIRBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Frontend/OpenMP -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Frontend/OpenMP -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-26-235520-9401-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
16
17#include "llvm/ADT/StringRef.h"
18#include "llvm/ADT/Triple.h"
19#include "llvm/IR/CFG.h"
20#include "llvm/IR/DebugInfo.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/MDBuilder.h"
23#include "llvm/IR/Value.h"
24#include "llvm/Support/CommandLine.h"
25#include "llvm/Support/Error.h"
26#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27#include "llvm/Transforms/Utils/CodeExtractor.h"
28
29#include <sstream>
30
31#define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder"
32
33using namespace llvm;
34using namespace omp;
35
36static cl::opt<bool>
37 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
38 cl::desc("Use optimistic attributes describing "
39 "'as-if' properties of runtime calls."),
40 cl::init(false));
41
42void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
43 LLVMContext &Ctx = Fn.getContext();
44
45 // Get the function's current attributes.
46 auto Attrs = Fn.getAttributes();
47 auto FnAttrs = Attrs.getFnAttributes();
48 auto RetAttrs = Attrs.getRetAttributes();
49 SmallVector<AttributeSet, 4> ArgAttrs;
50 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo)
51 ArgAttrs.emplace_back(Attrs.getParamAttributes(ArgNo));
52
53#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
54#include "llvm/Frontend/OpenMP/OMPKinds.def"
55
56 // Add attributes to the function declaration.
57 switch (FnID) {
58#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
59 case Enum: \
60 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \
61 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \
62 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \
63 ArgAttrs[ArgNo] = \
64 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \
65 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \
66 break;
67#include "llvm/Frontend/OpenMP/OMPKinds.def"
68 default:
69 // Attributes are optional.
70 break;
71 }
72}
73
74FunctionCallee
75OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) {
76 FunctionType *FnTy = nullptr;
77 Function *Fn = nullptr;
78
79 // Try to find the declation in the module first.
80 switch (FnID) {
81#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
82 case Enum: \
83 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
84 IsVarArg); \
85 Fn = M.getFunction(Str); \
86 break;
87#include "llvm/Frontend/OpenMP/OMPKinds.def"
88 }
89
90 if (!Fn) {
91 // Create a new declaration if we need one.
92 switch (FnID) {
93#define OMP_RTL(Enum, Str, ...) \
94 case Enum: \
95 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
96 break;
97#include "llvm/Frontend/OpenMP/OMPKinds.def"
98 }
99
100 // Add information if the runtime function takes a callback function
101 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
102 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
103 LLVMContext &Ctx = Fn->getContext();
104 MDBuilder MDB(Ctx);
105 // Annotate the callback behavior of the runtime function:
106 // - The callback callee is argument number 2 (microtask).
107 // - The first two arguments of the callback callee are unknown (-1).
108 // - All variadic arguments to the runtime function are passed to the
109 // callback callee.
110 Fn->addMetadata(
111 LLVMContext::MD_callback,
112 *MDNode::get(Ctx, {MDB.createCallbackEncoding(
113 2, {-1, -1}, /* VarArgsArePassed */ true)}));
114 }
115 }
116
117 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
118 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
119 addAttributes(FnID, *Fn);
120
121 } else {
122 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
123 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
124 }
125
126 assert(Fn && "Failed to create OpenMP runtime function")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 126, __extension__ __PRETTY_FUNCTION__))
;
127
128 // Cast the function to the expected type if necessary
129 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo());
130 return {FnTy, C};
131}
132
133Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) {
134 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID);
135 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
136 assert(Fn && "Failed to create OpenMP runtime function pointer")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function pointer"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 136, __extension__ __PRETTY_FUNCTION__))
;
137 return Fn;
138}
139
140void OpenMPIRBuilder::initialize() { initializeTypes(M); }
141
142void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
143 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
144 SmallVector<BasicBlock *, 32> Blocks;
145 SmallVector<OutlineInfo, 16> DeferredOutlines;
146 for (OutlineInfo &OI : OutlineInfos) {
147 // Skip functions that have not finalized yet; may happen with nested
148 // function generation.
149 if (Fn && OI.getFunction() != Fn) {
150 DeferredOutlines.push_back(OI);
151 continue;
152 }
153
154 ParallelRegionBlockSet.clear();
155 Blocks.clear();
156 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
157
158 Function *OuterFn = OI.getFunction();
159 CodeExtractorAnalysisCache CEAC(*OuterFn);
160 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
161 /* AggregateArgs */ false,
162 /* BlockFrequencyInfo */ nullptr,
163 /* BranchProbabilityInfo */ nullptr,
164 /* AssumptionCache */ nullptr,
165 /* AllowVarArgs */ true,
166 /* AllowAlloca */ true,
167 /* Suffix */ ".omp_par");
168
169 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before outlining: "
<< *OuterFn << "\n"; } } while (false)
;
170 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
171 << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
;
172 assert(Extractor.isEligible() &&(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
173 "Expected OpenMP outlining to be possible!")(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
;
174
175 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
176
177 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After outlining: "
<< *OuterFn << "\n"; } } while (false)
;
178 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << " Outlined function: "
<< *OutlinedFn << "\n"; } } while (false)
;
179 assert(OutlinedFn->getReturnType()->isVoidTy() &&(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 180, __extension__ __PRETTY_FUNCTION__))
180 "OpenMP outlined functions should not return a value!")(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 180, __extension__ __PRETTY_FUNCTION__))
;
181
182 // For compability with the clang CG we move the outlined function after the
183 // one with the parallel region.
184 OutlinedFn->removeFromParent();
185 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
186
187 // Remove the artificial entry introduced by the extractor right away, we
188 // made our own entry block after all.
189 {
190 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
191 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)(static_cast <bool> (ArtificialEntry.getUniqueSuccessor
() == OI.EntryBB) ? void (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 191, __extension__ __PRETTY_FUNCTION__))
;
192 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)(static_cast <bool> (OI.EntryBB->getUniquePredecessor
() == &ArtificialEntry) ? void (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193 if (AllowExtractorSinking) {
194 // Move instructions from the to-be-deleted ArtificialEntry to the entry
195 // basic block of the parallel region. CodeExtractor may have sunk
196 // allocas/bitcasts for values that are solely used in the outlined
197 // region and do not escape.
198 assert(!ArtificialEntry.empty() &&(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 199, __extension__ __PRETTY_FUNCTION__))
199 "Expected instructions to sink in the outlined region")(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 199, __extension__ __PRETTY_FUNCTION__))
;
200 for (BasicBlock::iterator It = ArtificialEntry.begin(),
201 End = ArtificialEntry.end();
202 It != End;) {
203 Instruction &I = *It;
204 It++;
205
206 if (I.isTerminator())
207 continue;
208
209 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
210 }
211 }
212 OI.EntryBB->moveBefore(&ArtificialEntry);
213 ArtificialEntry.eraseFromParent();
214 }
215 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)(static_cast <bool> (&OutlinedFn->getEntryBlock(
) == OI.EntryBB) ? void (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 215, __extension__ __PRETTY_FUNCTION__))
;
216 assert(OutlinedFn && OutlinedFn->getNumUses() == 1)(static_cast <bool> (OutlinedFn && OutlinedFn->
getNumUses() == 1) ? void (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 216, __extension__ __PRETTY_FUNCTION__))
;
217
218 // Run a user callback, e.g. to add attributes.
219 if (OI.PostOutlineCB)
220 OI.PostOutlineCB(*OutlinedFn);
221 }
222
223 // Remove work items that have been completed.
224 OutlineInfos = std::move(DeferredOutlines);
225}
226
227OpenMPIRBuilder::~OpenMPIRBuilder() {
228 assert(OutlineInfos.empty() && "There must be no outstanding outlinings")(static_cast <bool> (OutlineInfos.empty() && "There must be no outstanding outlinings"
) ? void (0) : __assert_fail ("OutlineInfos.empty() && \"There must be no outstanding outlinings\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 228, __extension__ __PRETTY_FUNCTION__))
;
229}
230
231Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
232 IdentFlag LocFlags,
233 unsigned Reserve2Flags) {
234 // Enable "C-mode".
235 LocFlags |= OMP_IDENT_FLAG_KMPC;
236
237 Value *&Ident =
238 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
239 if (!Ident) {
240 Constant *I32Null = ConstantInt::getNullValue(Int32);
241 Constant *IdentData[] = {
242 I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)),
243 ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr};
244 Constant *Initializer = ConstantStruct::get(
245 cast<StructType>(IdentPtr->getPointerElementType()), IdentData);
246
247 // Look for existing encoding of the location + flags, not needed but
248 // minimizes the difference to the existing solution while we transition.
249 for (GlobalVariable &GV : M.getGlobalList())
250 if (GV.getType() == IdentPtr && GV.hasInitializer())
251 if (GV.getInitializer() == Initializer)
252 return Ident = &GV;
253
254 auto *GV = new GlobalVariable(M, IdentPtr->getPointerElementType(),
255 /* isConstant = */ true,
256 GlobalValue::PrivateLinkage, Initializer);
257 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
258 GV->setAlignment(Align(8));
259 Ident = GV;
260 }
261 return Builder.CreatePointerCast(Ident, IdentPtr);
262}
263
264Type *OpenMPIRBuilder::getLanemaskType() {
265 LLVMContext &Ctx = M.getContext();
266 Triple triple(M.getTargetTriple());
267
268 // This test is adequate until deviceRTL has finer grained lane widths
269 return triple.isAMDGCN() ? Type::getInt64Ty(Ctx) : Type::getInt32Ty(Ctx);
270}
271
272Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) {
273 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
274 if (!SrcLocStr) {
275 Constant *Initializer =
276 ConstantDataArray::getString(M.getContext(), LocStr);
277
278 // Look for existing encoding of the location, not needed but minimizes the
279 // difference to the existing solution while we transition.
280 for (GlobalVariable &GV : M.getGlobalList())
281 if (GV.isConstant() && GV.hasInitializer() &&
282 GV.getInitializer() == Initializer)
283 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
284
285 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
286 /* AddressSpace */ 0, &M);
287 }
288 return SrcLocStr;
289}
290
291Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
292 StringRef FileName,
293 unsigned Line,
294 unsigned Column) {
295 SmallString<128> Buffer;
296 Buffer.push_back(';');
297 Buffer.append(FileName);
298 Buffer.push_back(';');
299 Buffer.append(FunctionName);
300 Buffer.push_back(';');
301 Buffer.append(std::to_string(Line));
302 Buffer.push_back(';');
303 Buffer.append(std::to_string(Column));
304 Buffer.push_back(';');
305 Buffer.push_back(';');
306 return getOrCreateSrcLocStr(Buffer.str());
307}
308
309Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() {
310 return getOrCreateSrcLocStr(";unknown;unknown;0;0;;");
311}
312
313Constant *
314OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) {
315 DILocation *DIL = Loc.DL.get();
316 if (!DIL)
317 return getOrCreateDefaultSrcLocStr();
318 StringRef FileName = M.getName();
319 if (DIFile *DIF = DIL->getFile())
320 if (Optional<StringRef> Source = DIF->getSource())
321 FileName = *Source;
322 StringRef Function = DIL->getScope()->getSubprogram()->getName();
323 Function =
324 !Function.empty() ? Function : Loc.IP.getBlock()->getParent()->getName();
325 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
326 DIL->getColumn());
327}
328
329Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) {
330 return Builder.CreateCall(
331 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
332 "omp_global_thread_num");
333}
334
335OpenMPIRBuilder::InsertPointTy
336OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK,
337 bool ForceSimpleCall, bool CheckCancelFlag) {
338 if (!updateToLocation(Loc))
339 return Loc.IP;
340 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
341}
342
343OpenMPIRBuilder::InsertPointTy
344OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind,
345 bool ForceSimpleCall, bool CheckCancelFlag) {
346 // Build call __kmpc_cancel_barrier(loc, thread_id) or
347 // __kmpc_barrier(loc, thread_id);
348
349 IdentFlag BarrierLocFlags;
350 switch (Kind) {
351 case OMPD_for:
352 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
353 break;
354 case OMPD_sections:
355 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
356 break;
357 case OMPD_single:
358 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
359 break;
360 case OMPD_barrier:
361 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
362 break;
363 default:
364 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
365 break;
366 }
367
368 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
369 Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags),
370 getOrCreateThreadID(getOrCreateIdent(SrcLocStr))};
371
372 // If we are in a cancellable parallel region, barriers are cancellation
373 // points.
374 // TODO: Check why we would force simple calls or to ignore the cancel flag.
375 bool UseCancelBarrier =
376 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
377
378 Value *Result =
379 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(
380 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
381 : OMPRTL___kmpc_barrier),
382 Args);
383
384 if (UseCancelBarrier && CheckCancelFlag)
385 emitCancelationCheckImpl(Result, OMPD_parallel);
386
387 return Builder.saveIP();
388}
389
390OpenMPIRBuilder::InsertPointTy
391OpenMPIRBuilder::createCancel(const LocationDescription &Loc,
392 Value *IfCondition,
393 omp::Directive CanceledDirective) {
394 if (!updateToLocation(Loc))
395 return Loc.IP;
396
397 // LLVM utilities like blocks with terminators.
398 auto *UI = Builder.CreateUnreachable();
399
400 Instruction *ThenTI = UI, *ElseTI = nullptr;
401 if (IfCondition)
402 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
403 Builder.SetInsertPoint(ThenTI);
404
405 Value *CancelKind = nullptr;
406 switch (CanceledDirective) {
407#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
408 case DirectiveEnum: \
409 CancelKind = Builder.getInt32(Value); \
410 break;
411#include "llvm/Frontend/OpenMP/OMPKinds.def"
412 default:
413 llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 413)
;
414 }
415
416 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
417 Value *Ident = getOrCreateIdent(SrcLocStr);
418 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
419 Value *Result = Builder.CreateCall(
420 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
421 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) {
422 if (CanceledDirective == OMPD_parallel) {
423 IRBuilder<>::InsertPointGuard IPG(Builder);
424 Builder.restoreIP(IP);
425 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
426 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
427 /* CheckCancelFlag */ false);
428 }
429 };
430
431 // The actual cancel logic is shared with others, e.g., cancel_barriers.
432 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB);
433
434 // Update the insertion point and remove the terminator we introduced.
435 Builder.SetInsertPoint(UI->getParent());
436 UI->eraseFromParent();
437
438 return Builder.saveIP();
439}
440
441void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
442 omp::Directive CanceledDirective,
443 FinalizeCallbackTy ExitCB) {
444 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 445, __extension__ __PRETTY_FUNCTION__))
445 "Unexpected cancellation!")(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 445, __extension__ __PRETTY_FUNCTION__))
;
446
447 // For a cancel barrier we create two new blocks.
448 BasicBlock *BB = Builder.GetInsertBlock();
449 BasicBlock *NonCancellationBlock;
450 if (Builder.GetInsertPoint() == BB->end()) {
451 // TODO: This branch will not be needed once we moved to the
452 // OpenMPIRBuilder codegen completely.
453 NonCancellationBlock = BasicBlock::Create(
454 BB->getContext(), BB->getName() + ".cont", BB->getParent());
455 } else {
456 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
457 BB->getTerminator()->eraseFromParent();
458 Builder.SetInsertPoint(BB);
459 }
460 BasicBlock *CancellationBlock = BasicBlock::Create(
461 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
462
463 // Jump to them based on the return value.
464 Value *Cmp = Builder.CreateIsNull(CancelFlag);
465 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
466 /* TODO weight */ nullptr, nullptr);
467
468 // From the cancellation block we finalize all variables and go to the
469 // post finalization block that is known to the FiniCB callback.
470 Builder.SetInsertPoint(CancellationBlock);
471 if (ExitCB)
472 ExitCB(Builder.saveIP());
473 auto &FI = FinalizationStack.back();
474 FI.FiniCB(Builder.saveIP());
475
476 // The continuation block is where code generation continues.
477 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
478}
479
480IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
481 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
482 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
483 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
484 omp::ProcBindKind ProcBind, bool IsCancellable) {
485 if (!updateToLocation(Loc))
1
Taking false branch
486 return Loc.IP;
487
488 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
489 Value *Ident = getOrCreateIdent(SrcLocStr);
490 Value *ThreadID = getOrCreateThreadID(Ident);
491
492 if (NumThreads) {
2
Assuming 'NumThreads' is null
3
Taking false branch
493 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
494 Value *Args[] = {
495 Ident, ThreadID,
496 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
497 Builder.CreateCall(
498 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
499 }
500
501 if (ProcBind != OMP_PROC_BIND_default) {
4
Assuming 'ProcBind' is equal to 'OMP_PROC_BIND_default'
5
Taking false branch
502 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
503 Value *Args[] = {
504 Ident, ThreadID,
505 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
506 Builder.CreateCall(
507 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
508 }
509
510 BasicBlock *InsertBB = Builder.GetInsertBlock();
511 Function *OuterFn = InsertBB->getParent();
512
513 // Save the outer alloca block because the insertion iterator may get
514 // invalidated and we still need this later.
515 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
516
517 // Vector to remember instructions we used only during the modeling but which
518 // we want to delete at the end.
519 SmallVector<Instruction *, 4> ToBeDeleted;
520
521 // Change the location to the outer alloca insertion point to create and
522 // initialize the allocas we pass into the parallel region.
523 Builder.restoreIP(OuterAllocaIP);
524 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
525 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr");
526
527 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the
528 // program, otherwise we only need them for modeling purposes to get the
529 // associated arguments in the outlined function. In the former case,
530 // initialize the allocas properly, in the latter case, delete them later.
531 if (IfCondition) {
6
Assuming 'IfCondition' is null
7
Taking false branch
532 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr);
533 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr);
534 } else {
535 ToBeDeleted.push_back(TIDAddr);
536 ToBeDeleted.push_back(ZeroAddr);
537 }
538
539 // Create an artificial insertion point that will also ensure the blocks we
540 // are about to split are not degenerated.
541 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
542
543 Instruction *ThenTI = UI, *ElseTI = nullptr;
544 if (IfCondition
7.1
'IfCondition' is null
7.1
'IfCondition' is null
7.1
'IfCondition' is null
)
8
Taking false branch
545 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
546
547 BasicBlock *ThenBB = ThenTI->getParent();
548 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry");
549 BasicBlock *PRegBodyBB =
550 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region");
551 BasicBlock *PRegPreFiniBB =
552 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize");
553 BasicBlock *PRegExitBB =
554 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit");
555
556 auto FiniCBWrapper = [&](InsertPointTy IP) {
557 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
558 // target to the region exit block.
559 if (IP.getBlock()->end() == IP.getPoint()) {
560 IRBuilder<>::InsertPointGuard IPG(Builder);
561 Builder.restoreIP(IP);
562 Instruction *I = Builder.CreateBr(PRegExitBB);
563 IP = InsertPointTy(I->getParent(), I->getIterator());
564 }
565 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 567, __extension__ __PRETTY_FUNCTION__))
566 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 567, __extension__ __PRETTY_FUNCTION__))
567 "Unexpected insertion point for finalization call!")(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 567, __extension__ __PRETTY_FUNCTION__))
;
568 return FiniCB(IP);
569 };
570
571 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
572
573 // Generate the privatization allocas in the block that will become the entry
574 // of the outlined function.
575 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
576 InsertPointTy InnerAllocaIP = Builder.saveIP();
577
578 AllocaInst *PrivTIDAddr =
579 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
580 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid");
581
582 // Add some fake uses for OpenMP provided arguments.
583 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use"));
584 Instruction *ZeroAddrUse = Builder.CreateLoad(Int32, ZeroAddr,
585 "zero.addr.use");
586 ToBeDeleted.push_back(ZeroAddrUse);
587
588 // ThenBB
589 // |
590 // V
591 // PRegionEntryBB <- Privatization allocas are placed here.
592 // |
593 // V
594 // PRegionBodyBB <- BodeGen is invoked here.
595 // |
596 // V
597 // PRegPreFiniBB <- The block we will start finalization from.
598 // |
599 // V
600 // PRegionExitBB <- A common exit to simplify block collection.
601 //
602
603 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
9
Assuming 'DebugFlag' is false
10
Loop condition is false. Exiting loop
604
605 // Let the caller create the body.
606 assert(BodyGenCB && "Expected body generation callback!")(static_cast <bool> (BodyGenCB && "Expected body generation callback!"
) ? void (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 606, __extension__ __PRETTY_FUNCTION__))
;
11
Assuming the condition is true
12
'?' condition is true
607 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
608 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB);
609
610 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
13
Assuming 'DebugFlag' is false
14
Loop condition is false. Exiting loop
611
612 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
613 if (auto *F
15.1
'F' is null
15.1
'F' is null
15.1
'F' is null
= dyn_cast<llvm::Function>(RTLFn.getCallee())) {
15
Assuming the object is not a 'Function'
16
Taking false branch
614 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
615 llvm::LLVMContext &Ctx = F->getContext();
616 MDBuilder MDB(Ctx);
617 // Annotate the callback behavior of the __kmpc_fork_call:
618 // - The callback callee is argument number 2 (microtask).
619 // - The first two arguments of the callback callee are unknown (-1).
620 // - All variadic arguments to the __kmpc_fork_call are passed to the
621 // callback callee.
622 F->addMetadata(
623 llvm::LLVMContext::MD_callback,
624 *llvm::MDNode::get(
625 Ctx, {MDB.createCallbackEncoding(2, {-1, -1},
626 /* VarArgsArePassed */ true)}));
627 }
628 }
629
630 OutlineInfo OI;
631 OI.PostOutlineCB = [=](Function &OutlinedFn) {
632 // Add some known attributes.
633 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
634 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
635 OutlinedFn.addFnAttr(Attribute::NoUnwind);
636 OutlinedFn.addFnAttr(Attribute::NoRecurse);
637
638 assert(OutlinedFn.arg_size() >= 2 &&(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 639, __extension__ __PRETTY_FUNCTION__))
639 "Expected at least tid and bounded tid as arguments")(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 639, __extension__ __PRETTY_FUNCTION__))
;
640 unsigned NumCapturedVars =
641 OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
642
643 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
644 CI->getParent()->setName("omp_parallel");
645 Builder.SetInsertPoint(CI);
646
647 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn);
648 Value *ForkCallArgs[] = {
649 Ident, Builder.getInt32(NumCapturedVars),
650 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)};
651
652 SmallVector<Value *, 16> RealArgs;
653 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
654 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
655
656 Builder.CreateCall(RTLFn, RealArgs);
657
658 LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
659 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
660
661 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end());
662
663 // Initialize the local TID stack location with the argument value.
664 Builder.SetInsertPoint(PrivTID);
665 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
666 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr);
667
668 // If no "if" clause was present we do not need the call created during
669 // outlining, otherwise we reuse it in the serialized parallel region.
670 if (!ElseTI) {
671 CI->eraseFromParent();
672 } else {
673
674 // If an "if" clause was present we are now generating the serialized
675 // version into the "else" branch.
676 Builder.SetInsertPoint(ElseTI);
677
678 // Build calls __kmpc_serialized_parallel(&Ident, GTid);
679 Value *SerializedParallelCallArgs[] = {Ident, ThreadID};
680 Builder.CreateCall(
681 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel),
682 SerializedParallelCallArgs);
683
684 // OutlinedFn(&GTid, &zero, CapturedStruct);
685 CI->removeFromParent();
686 Builder.Insert(CI);
687
688 // __kmpc_end_serialized_parallel(&Ident, GTid);
689 Value *EndArgs[] = {Ident, ThreadID};
690 Builder.CreateCall(
691 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel),
692 EndArgs);
693
694 LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
695 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
696 }
697
698 for (Instruction *I : ToBeDeleted)
699 I->eraseFromParent();
700 };
701
702 // Adjust the finalization stack, verify the adjustment, and call the
703 // finalize function a last time to finalize values between the pre-fini
704 // block and the exit block if we left the parallel "the normal way".
705 auto FiniInfo = FinalizationStack.pop_back_val();
706 (void)FiniInfo;
707 assert(FiniInfo.DK == OMPD_parallel &&(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 708, __extension__ __PRETTY_FUNCTION__))
17
Assuming 'OMPD_parallel' is equal to field 'DK'
18
'?' condition is true
708 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 708, __extension__ __PRETTY_FUNCTION__))
;
709
710 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
711
712 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
713 FiniCB(PreFiniIP);
714
715 OI.EntryBB = PRegEntryBB;
716 OI.ExitBB = PRegExitBB;
717
718 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
719 SmallVector<BasicBlock *, 32> Blocks;
720 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
721
722 // Ensure a single exit node for the outlined region by creating one.
723 // We might have multiple incoming edges to the exit now due to finalizations,
724 // e.g., cancel calls that cause the control flow to leave the region.
725 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
726 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
727 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
728 Blocks.push_back(PRegOutlinedExitBB);
729
730 CodeExtractorAnalysisCache CEAC(*OuterFn);
731 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
732 /* AggregateArgs */ false,
733 /* BlockFrequencyInfo */ nullptr,
734 /* BranchProbabilityInfo */ nullptr,
735 /* AssumptionCache */ nullptr,
736 /* AllowVarArgs */ true,
737 /* AllowAlloca */ true,
738 /* Suffix */ ".omp_par");
739
740 // Find inputs to, outputs from the code region.
741 BasicBlock *CommonExit = nullptr;
742 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
743 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
744 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
745
746 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before privatization: "
<< *OuterFn << "\n"; } } while (false)
;
19
Assuming 'DebugFlag' is false
20
Loop condition is false. Exiting loop
747
748 FunctionCallee TIDRTLFn =
749 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
750
751 auto PrivHelper = [&](Value &V) {
752 if (&V == TIDAddr || &V == ZeroAddr)
24
Assuming the condition is false
25
Assuming the condition is false
26
Taking false branch
753 return;
754
755 SetVector<Use *> Uses;
756 for (Use &U : V.uses())
757 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
758 if (ParallelRegionBlockSet.count(UserI->getParent()))
759 Uses.insert(&U);
760
761 // __kmpc_fork_call expects extra arguments as pointers. If the input
762 // already has a pointer type, everything is fine. Otherwise, store the
763 // value onto stack and load it back inside the to-be-outlined region. This
764 // will ensure only the pointer will be passed to the function.
765 // FIXME: if there are more than 15 trailing arguments, they must be
766 // additionally packed in a struct.
767 Value *Inner = &V;
768 if (!V.getType()->isPointerTy()) {
27
Taking true branch
769 IRBuilder<>::InsertPointGuard Guard(Builder);
770 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: "
<< V << "\n"; } } while (false)
;
28
Assuming 'DebugFlag' is false
29
Loop condition is false. Exiting loop
771
772 Builder.restoreIP(OuterAllocaIP);
773 Value *Ptr =
774 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
775
776 // Store to stack at end of the block that currently branches to the entry
777 // block of the to-be-outlined region.
778 Builder.SetInsertPoint(InsertBB,
779 InsertBB->getTerminator()->getIterator());
780 Builder.CreateStore(&V, Ptr);
781
782 // Load back next to allocations in the to-be-outlined region.
783 Builder.restoreIP(InnerAllocaIP);
30
Calling 'IRBuilderBase::restoreIP'
35
Returning from 'IRBuilderBase::restoreIP'
784 Inner = Builder.CreateLoad(V.getType(), Ptr);
36
Calling 'IRBuilderBase::CreateLoad'
785 }
786
787 Value *ReplacementValue = nullptr;
788 CallInst *CI = dyn_cast<CallInst>(&V);
789 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
790 ReplacementValue = PrivTID;
791 } else {
792 Builder.restoreIP(
793 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
794 assert(ReplacementValue &&(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 795, __extension__ __PRETTY_FUNCTION__))
795 "Expected copy/create callback to set replacement value!")(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 795, __extension__ __PRETTY_FUNCTION__))
;
796 if (ReplacementValue == &V)
797 return;
798 }
799
800 for (Use *UPtr : Uses)
801 UPtr->set(ReplacementValue);
802 };
803
804 // Reset the inner alloca insertion as it will be used for loading the values
805 // wrapped into pointers before passing them into the to-be-outlined region.
806 // Configure it to insert immediately after the fake use of zero address so
807 // that they are available in the generated body and so that the
808 // OpenMP-related values (thread ID and zero address pointers) remain leading
809 // in the argument list.
810 InnerAllocaIP = IRBuilder<>::InsertPoint(
811 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
812
813 // Reset the outer alloca insertion point to the entry of the relevant block
814 // in case it was invalidated.
815 OuterAllocaIP = IRBuilder<>::InsertPoint(
816 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
817
818 for (Value *Input : Inputs) {
819 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Captured input: " <<
*Input << "\n"; } } while (false)
;
21
Assuming 'DebugFlag' is false
22
Loop condition is false. Exiting loop
820 PrivHelper(*Input);
23
Calling 'operator()'
821 }
822 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
823 for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
824 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
825 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
;
826 assert(Outputs.empty() &&(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 827, __extension__ __PRETTY_FUNCTION__))
827 "OpenMP outlining should not produce live-out values!")(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 827, __extension__ __PRETTY_FUNCTION__))
;
828
829 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After privatization: "
<< *OuterFn << "\n"; } } while (false)
;
830 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
831 for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
832 dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
833 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
;
834
835 // Register the outlined info.
836 addOutlineInfo(std::move(OI));
837
838 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
839 UI->eraseFromParent();
840
841 return AfterIP;
842}
843
844void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) {
845 // Build call void __kmpc_flush(ident_t *loc)
846 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
847 Value *Args[] = {getOrCreateIdent(SrcLocStr)};
848
849 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
850}
851
852void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) {
853 if (!updateToLocation(Loc))
854 return;
855 emitFlush(Loc);
856}
857
858void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) {
859 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
860 // global_tid);
861 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
862 Value *Ident = getOrCreateIdent(SrcLocStr);
863 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
864
865 // Ignore return result until untied tasks are supported.
866 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
867 Args);
868}
869
870void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) {
871 if (!updateToLocation(Loc))
872 return;
873 emitTaskwaitImpl(Loc);
874}
875
876void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) {
877 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
878 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
879 Value *Ident = getOrCreateIdent(SrcLocStr);
880 Constant *I32Null = ConstantInt::getNullValue(Int32);
881 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
882
883 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
884 Args);
885}
886
887void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) {
888 if (!updateToLocation(Loc))
889 return;
890 emitTaskyieldImpl(Loc);
891}
892
893OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections(
894 const LocationDescription &Loc, InsertPointTy AllocaIP,
895 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB,
896 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) {
897 if (!updateToLocation(Loc))
898 return Loc.IP;
899
900 auto FiniCBWrapper = [&](InsertPointTy IP) {
901 if (IP.getBlock()->end() != IP.getPoint())
902 return FiniCB(IP);
903 // This must be done otherwise any nested constructs using FinalizeOMPRegion
904 // will fail because that function requires the Finalization Basic Block to
905 // have a terminator, which is already removed by EmitOMPRegionBody.
906 // IP is currently at cancelation block.
907 // We need to backtrack to the condition block to fetch
908 // the exit block and create a branch from cancelation
909 // to exit block.
910 IRBuilder<>::InsertPointGuard IPG(Builder);
911 Builder.restoreIP(IP);
912 auto *CaseBB = IP.getBlock()->getSinglePredecessor();
913 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
914 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
915 Instruction *I = Builder.CreateBr(ExitBB);
916 IP = InsertPointTy(I->getParent(), I->getIterator());
917 return FiniCB(IP);
918 };
919
920 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable});
921
922 // Each section is emitted as a switch case
923 // Each finalization callback is handled from clang.EmitOMPSectionDirective()
924 // -> OMP.createSection() which generates the IR for each section
925 // Iterate through all sections and emit a switch construct:
926 // switch (IV) {
927 // case 0:
928 // <SectionStmt[0]>;
929 // break;
930 // ...
931 // case <NumSection> - 1:
932 // <SectionStmt[<NumSection> - 1]>;
933 // break;
934 // }
935 // ...
936 // section_loop.after:
937 // <FiniCB>;
938 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) {
939 auto *CurFn = CodeGenIP.getBlock()->getParent();
940 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor();
941 auto *ForExitBB = CodeGenIP.getBlock()
942 ->getSinglePredecessor()
943 ->getTerminator()
944 ->getSuccessor(1);
945 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB);
946 Builder.restoreIP(CodeGenIP);
947 unsigned CaseNumber = 0;
948 for (auto SectionCB : SectionCBs) {
949 auto *CaseBB = BasicBlock::Create(M.getContext(),
950 "omp_section_loop.body.case", CurFn);
951 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB);
952 Builder.SetInsertPoint(CaseBB);
953 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB);
954 CaseNumber++;
955 }
956 // remove the existing terminator from body BB since there can be no
957 // terminators after switch/case
958 CodeGenIP.getBlock()->getTerminator()->eraseFromParent();
959 };
960 // Loop body ends here
961 // LowerBound, UpperBound, and STride for createCanonicalLoop
962 Type *I32Ty = Type::getInt32Ty(M.getContext());
963 Value *LB = ConstantInt::get(I32Ty, 0);
964 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size());
965 Value *ST = ConstantInt::get(I32Ty, 1);
966 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop(
967 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop");
968 LoopInfo = createStaticWorkshareLoop(Loc, LoopInfo, AllocaIP, true);
969 BasicBlock *LoopAfterBB = LoopInfo->getAfter();
970 Instruction *SplitPos = LoopAfterBB->getTerminator();
971 if (!isa_and_nonnull<BranchInst>(SplitPos))
972 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB);
973 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB,
974 // which requires a BB with branch
975 BasicBlock *ExitBB =
976 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end");
977 SplitPos->eraseFromParent();
978
979 // Apply the finalization callback in LoopAfterBB
980 auto FiniInfo = FinalizationStack.pop_back_val();
981 assert(FiniInfo.DK == OMPD_sections &&(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 982, __extension__ __PRETTY_FUNCTION__))
982 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 982, __extension__ __PRETTY_FUNCTION__))
;
983 Builder.SetInsertPoint(LoopAfterBB->getTerminator());
984 FiniInfo.FiniCB(Builder.saveIP());
985 Builder.SetInsertPoint(ExitBB);
986
987 return Builder.saveIP();
988}
989
990OpenMPIRBuilder::InsertPointTy
991OpenMPIRBuilder::createSection(const LocationDescription &Loc,
992 BodyGenCallbackTy BodyGenCB,
993 FinalizeCallbackTy FiniCB) {
994 if (!updateToLocation(Loc))
995 return Loc.IP;
996
997 auto FiniCBWrapper = [&](InsertPointTy IP) {
998 if (IP.getBlock()->end() != IP.getPoint())
999 return FiniCB(IP);
1000 // This must be done otherwise any nested constructs using FinalizeOMPRegion
1001 // will fail because that function requires the Finalization Basic Block to
1002 // have a terminator, which is already removed by EmitOMPRegionBody.
1003 // IP is currently at cancelation block.
1004 // We need to backtrack to the condition block to fetch
1005 // the exit block and create a branch from cancelation
1006 // to exit block.
1007 IRBuilder<>::InsertPointGuard IPG(Builder);
1008 Builder.restoreIP(IP);
1009 auto *CaseBB = Loc.IP.getBlock();
1010 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
1011 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
1012 Instruction *I = Builder.CreateBr(ExitBB);
1013 IP = InsertPointTy(I->getParent(), I->getIterator());
1014 return FiniCB(IP);
1015 };
1016
1017 Directive OMPD = Directive::OMPD_sections;
1018 // Since we are using Finalization Callback here, HasFinalize
1019 // and IsCancellable have to be true
1020 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper,
1021 /*Conditional*/ false, /*hasFinalize*/ true,
1022 /*IsCancellable*/ true);
1023}
1024
1025OpenMPIRBuilder::InsertPointTy
1026OpenMPIRBuilder::createMaster(const LocationDescription &Loc,
1027 BodyGenCallbackTy BodyGenCB,
1028 FinalizeCallbackTy FiniCB) {
1029
1030 if (!updateToLocation(Loc))
1031 return Loc.IP;
1032
1033 Directive OMPD = Directive::OMPD_master;
1034 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1035 Value *Ident = getOrCreateIdent(SrcLocStr);
1036 Value *ThreadId = getOrCreateThreadID(Ident);
1037 Value *Args[] = {Ident, ThreadId};
1038
1039 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
1040 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1041
1042 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
1043 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1044
1045 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1046 /*Conditional*/ true, /*hasFinalize*/ true);
1047}
1048
1049OpenMPIRBuilder::InsertPointTy
1050OpenMPIRBuilder::createMasked(const LocationDescription &Loc,
1051 BodyGenCallbackTy BodyGenCB,
1052 FinalizeCallbackTy FiniCB, Value *Filter) {
1053 if (!updateToLocation(Loc))
1054 return Loc.IP;
1055
1056 Directive OMPD = Directive::OMPD_masked;
1057 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1058 Value *Ident = getOrCreateIdent(SrcLocStr);
1059 Value *ThreadId = getOrCreateThreadID(Ident);
1060 Value *Args[] = {Ident, ThreadId, Filter};
1061 Value *ArgsEnd[] = {Ident, ThreadId};
1062
1063 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked);
1064 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1065
1066 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked);
1067 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd);
1068
1069 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1070 /*Conditional*/ true, /*hasFinalize*/ true);
1071}
1072
1073CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton(
1074 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
1075 BasicBlock *PostInsertBefore, const Twine &Name) {
1076 Module *M = F->getParent();
1077 LLVMContext &Ctx = M->getContext();
1078 Type *IndVarTy = TripCount->getType();
1079
1080 // Create the basic block structure.
1081 BasicBlock *Preheader =
1082 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
1083 BasicBlock *Header =
1084 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
1085 BasicBlock *Cond =
1086 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
1087 BasicBlock *Body =
1088 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
1089 BasicBlock *Latch =
1090 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
1091 BasicBlock *Exit =
1092 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
1093 BasicBlock *After =
1094 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
1095
1096 // Use specified DebugLoc for new instructions.
1097 Builder.SetCurrentDebugLocation(DL);
1098
1099 Builder.SetInsertPoint(Preheader);
1100 Builder.CreateBr(Header);
1101
1102 Builder.SetInsertPoint(Header);
1103 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
1104 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
1105 Builder.CreateBr(Cond);
1106
1107 Builder.SetInsertPoint(Cond);
1108 Value *Cmp =
1109 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
1110 Builder.CreateCondBr(Cmp, Body, Exit);
1111
1112 Builder.SetInsertPoint(Body);
1113 Builder.CreateBr(Latch);
1114
1115 Builder.SetInsertPoint(Latch);
1116 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
1117 "omp_" + Name + ".next", /*HasNUW=*/true);
1118 Builder.CreateBr(Header);
1119 IndVarPHI->addIncoming(Next, Latch);
1120
1121 Builder.SetInsertPoint(Exit);
1122 Builder.CreateBr(After);
1123
1124 // Remember and return the canonical control flow.
1125 LoopInfos.emplace_front();
1126 CanonicalLoopInfo *CL = &LoopInfos.front();
1127
1128 CL->Preheader = Preheader;
1129 CL->Header = Header;
1130 CL->Cond = Cond;
1131 CL->Body = Body;
1132 CL->Latch = Latch;
1133 CL->Exit = Exit;
1134 CL->After = After;
1135
1136 CL->IsValid = true;
1137
1138#ifndef NDEBUG
1139 CL->assertOK();
1140#endif
1141 return CL;
1142}
1143
1144CanonicalLoopInfo *
1145OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc,
1146 LoopBodyGenCallbackTy BodyGenCB,
1147 Value *TripCount, const Twine &Name) {
1148 BasicBlock *BB = Loc.IP.getBlock();
1149 BasicBlock *NextBB = BB->getNextNode();
1150
1151 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
1152 NextBB, NextBB, Name);
1153 BasicBlock *After = CL->getAfter();
1154
1155 // If location is not set, don't connect the loop.
1156 if (updateToLocation(Loc)) {
1157 // Split the loop at the insertion point: Branch to the preheader and move
1158 // every following instruction to after the loop (the After BB). Also, the
1159 // new successor is the loop's after block.
1160 Builder.CreateBr(CL->Preheader);
1161 After->getInstList().splice(After->begin(), BB->getInstList(),
1162 Builder.GetInsertPoint(), BB->end());
1163 After->replaceSuccessorsPhiUsesWith(BB, After);
1164 }
1165
1166 // Emit the body content. We do it after connecting the loop to the CFG to
1167 // avoid that the callback encounters degenerate BBs.
1168 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
1169
1170#ifndef NDEBUG
1171 CL->assertOK();
1172#endif
1173 return CL;
1174}
1175
1176CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop(
1177 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
1178 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
1179 InsertPointTy ComputeIP, const Twine &Name) {
1180
1181 // Consider the following difficulties (assuming 8-bit signed integers):
1182 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
1183 // DO I = 1, 100, 50
1184 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
1185 // DO I = 100, 0, -128
1186
1187 // Start, Stop and Step must be of the same integer type.
1188 auto *IndVarTy = cast<IntegerType>(Start->getType());
1189 assert(IndVarTy == Stop->getType() && "Stop type mismatch")(static_cast <bool> (IndVarTy == Stop->getType() &&
"Stop type mismatch") ? void (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190 assert(IndVarTy == Step->getType() && "Step type mismatch")(static_cast <bool> (IndVarTy == Step->getType() &&
"Step type mismatch") ? void (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1190, __extension__ __PRETTY_FUNCTION__))
;
1191
1192 LocationDescription ComputeLoc =
1193 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
1194 updateToLocation(ComputeLoc);
1195
1196 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
1197 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
1198
1199 // Like Step, but always positive.
1200 Value *Incr = Step;
1201
1202 // Distance between Start and Stop; always positive.
1203 Value *Span;
1204
1205 // Condition whether there are no iterations are executed at all, e.g. because
1206 // UB < LB.
1207 Value *ZeroCmp;
1208
1209 if (IsSigned) {
1210 // Ensure that increment is positive. If not, negate and invert LB and UB.
1211 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
1212 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
1213 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
1214 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
1215 Span = Builder.CreateSub(UB, LB, "", false, true);
1216 ZeroCmp = Builder.CreateICmp(
1217 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
1218 } else {
1219 Span = Builder.CreateSub(Stop, Start, "", true);
1220 ZeroCmp = Builder.CreateICmp(
1221 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
1222 }
1223
1224 Value *CountIfLooping;
1225 if (InclusiveStop) {
1226 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
1227 } else {
1228 // Avoid incrementing past stop since it could overflow.
1229 Value *CountIfTwo = Builder.CreateAdd(
1230 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
1231 Value *OneCmp = Builder.CreateICmp(
1232 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr);
1233 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
1234 }
1235 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
1236 "omp_" + Name + ".tripcount");
1237
1238 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
1239 Builder.restoreIP(CodeGenIP);
1240 Value *Span = Builder.CreateMul(IV, Step);
1241 Value *IndVar = Builder.CreateAdd(Span, Start);
1242 BodyGenCB(Builder.saveIP(), IndVar);
1243 };
1244 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
1245 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
1246}
1247
1248// Returns an LLVM function to call for initializing loop bounds using OpenMP
1249// static scheduling depending on `type`. Only i32 and i64 are supported by the
1250// runtime. Always interpret integers as unsigned similarly to
1251// CanonicalLoopInfo.
1252static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M,
1253 OpenMPIRBuilder &OMPBuilder) {
1254 unsigned Bitwidth = Ty->getIntegerBitWidth();
1255 if (Bitwidth == 32)
1256 return OMPBuilder.getOrCreateRuntimeFunction(
1257 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
1258 if (Bitwidth == 64)
1259 return OMPBuilder.getOrCreateRuntimeFunction(
1260 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
1261 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1261)
;
1262}
1263
1264// Sets the number of loop iterations to the given value. This value must be
1265// valid in the condition block (i.e., defined in the preheader) and is
1266// interpreted as an unsigned integer.
1267void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) {
1268 Instruction *CmpI = &CLI->getCond()->front();
1269 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")(static_cast <bool> (isa<CmpInst>(CmpI) &&
"First inst must compare IV with TripCount") ? void (0) : __assert_fail
("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1269, __extension__ __PRETTY_FUNCTION__))
;
1270 CmpI->setOperand(1, TripCount);
1271 CLI->assertOK();
1272}
1273
1274CanonicalLoopInfo *OpenMPIRBuilder::createStaticWorkshareLoop(
1275 const LocationDescription &Loc, CanonicalLoopInfo *CLI,
1276 InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk) {
1277 // Set up the source location value for OpenMP runtime.
1278 if (!updateToLocation(Loc))
1279 return nullptr;
1280
1281 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1282 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1283
1284 // Declare useful OpenMP runtime functions.
1285 Value *IV = CLI->getIndVar();
1286 Type *IVTy = IV->getType();
1287 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
1288 FunctionCallee StaticFini =
1289 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
1290
1291 // Allocate space for computed loop bounds as expected by the "init" function.
1292 Builder.restoreIP(AllocaIP);
1293 Type *I32Type = Type::getInt32Ty(M.getContext());
1294 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1295 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1296 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1297 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1298
1299 // At the end of the preheader, prepare for calling the "init" function by
1300 // storing the current loop bounds into the allocated space. A canonical loop
1301 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1302 // and produces an inclusive upper bound.
1303 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
1304 Constant *Zero = ConstantInt::get(IVTy, 0);
1305 Constant *One = ConstantInt::get(IVTy, 1);
1306 Builder.CreateStore(Zero, PLowerBound);
1307 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
1308 Builder.CreateStore(UpperBound, PUpperBound);
1309 Builder.CreateStore(One, PStride);
1310
1311 if (!Chunk)
1312 Chunk = One;
1313
1314 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1315
1316 Constant *SchedulingType =
1317 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static));
1318
1319 // Call the "init" function and update the trip count of the loop with the
1320 // value it produced.
1321 Builder.CreateCall(StaticInit,
1322 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
1323 PUpperBound, PStride, One, Chunk});
1324 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound);
1325 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound);
1326 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
1327 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
1328 setCanonicalLoopTripCount(CLI, TripCount);
1329
1330 // Update all uses of the induction variable except the one in the condition
1331 // block that compares it with the actual upper bound, and the increment in
1332 // the latch block.
1333 // TODO: this can eventually move to CanonicalLoopInfo or to a new
1334 // CanonicalLoopInfoUpdater interface.
1335 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt());
1336 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound);
1337 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) {
1338 auto *Instr = dyn_cast<Instruction>(U.getUser());
1339 return !Instr ||
1340 (Instr->getParent() != CLI->getCond() &&
1341 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV);
1342 });
1343
1344 // In the "exit" block, call the "fini" function.
1345 Builder.SetInsertPoint(CLI->getExit(),
1346 CLI->getExit()->getTerminator()->getIterator());
1347 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
1348
1349 // Add the barrier if requested.
1350 if (NeedsBarrier)
1351 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
1352 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1353 /* CheckCancelFlag */ false);
1354
1355 CLI->assertOK();
1356 return CLI;
1357}
1358
1359CanonicalLoopInfo *OpenMPIRBuilder::createWorkshareLoop(
1360 const LocationDescription &Loc, CanonicalLoopInfo *CLI,
1361 InsertPointTy AllocaIP, bool NeedsBarrier) {
1362 // Currently only supports static schedules.
1363 return createStaticWorkshareLoop(Loc, CLI, AllocaIP, NeedsBarrier);
1364}
1365
1366/// Returns an LLVM function to call for initializing loop bounds using OpenMP
1367/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1368/// the runtime. Always interpret integers as unsigned similarly to
1369/// CanonicalLoopInfo.
1370static FunctionCallee
1371getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1372 unsigned Bitwidth = Ty->getIntegerBitWidth();
1373 if (Bitwidth == 32)
1374 return OMPBuilder.getOrCreateRuntimeFunction(
1375 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u);
1376 if (Bitwidth == 64)
1377 return OMPBuilder.getOrCreateRuntimeFunction(
1378 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u);
1379 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1379)
;
1380}
1381
1382/// Returns an LLVM function to call for updating the next loop using OpenMP
1383/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1384/// the runtime. Always interpret integers as unsigned similarly to
1385/// CanonicalLoopInfo.
1386static FunctionCallee
1387getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1388 unsigned Bitwidth = Ty->getIntegerBitWidth();
1389 if (Bitwidth == 32)
1390 return OMPBuilder.getOrCreateRuntimeFunction(
1391 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u);
1392 if (Bitwidth == 64)
1393 return OMPBuilder.getOrCreateRuntimeFunction(
1394 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u);
1395 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1395)
;
1396}
1397
1398OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createDynamicWorkshareLoop(
1399 const LocationDescription &Loc, CanonicalLoopInfo *CLI,
1400 InsertPointTy AllocaIP, OMPScheduleType SchedType, bool NeedsBarrier,
1401 Value *Chunk) {
1402 // Set up the source location value for OpenMP runtime.
1403 Builder.SetCurrentDebugLocation(Loc.DL);
1404
1405 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1406 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1407
1408 // Declare useful OpenMP runtime functions.
1409 Value *IV = CLI->getIndVar();
1410 Type *IVTy = IV->getType();
1411 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this);
1412 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this);
1413
1414 // Allocate space for computed loop bounds as expected by the "init" function.
1415 Builder.restoreIP(AllocaIP);
1416 Type *I32Type = Type::getInt32Ty(M.getContext());
1417 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1418 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1419 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1420 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1421
1422 // At the end of the preheader, prepare for calling the "init" function by
1423 // storing the current loop bounds into the allocated space. A canonical loop
1424 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1425 // and produces an inclusive upper bound.
1426 BasicBlock *PreHeader = CLI->getPreheader();
1427 Builder.SetInsertPoint(PreHeader->getTerminator());
1428 Constant *One = ConstantInt::get(IVTy, 1);
1429 Builder.CreateStore(One, PLowerBound);
1430 Value *UpperBound = CLI->getTripCount();
1431 Builder.CreateStore(UpperBound, PUpperBound);
1432 Builder.CreateStore(One, PStride);
1433
1434 BasicBlock *Header = CLI->getHeader();
1435 BasicBlock *Exit = CLI->getExit();
1436 BasicBlock *Cond = CLI->getCond();
1437 InsertPointTy AfterIP = CLI->getAfterIP();
1438
1439 // The CLI will be "broken" in the code below, as the loop is no longer
1440 // a valid canonical loop.
1441
1442 if (!Chunk)
1443 Chunk = One;
1444
1445 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1446
1447 Constant *SchedulingType =
1448 ConstantInt::get(I32Type, static_cast<int>(SchedType));
1449
1450 // Call the "init" function.
1451 Builder.CreateCall(DynamicInit,
1452 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One,
1453 UpperBound, /* step */ One, Chunk});
1454
1455 // An outer loop around the existing one.
1456 BasicBlock *OuterCond = BasicBlock::Create(
1457 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
1458 PreHeader->getParent());
1459 // This needs to be 32-bit always, so can't use the IVTy Zero above.
1460 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
1461 Value *Res =
1462 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
1463 PLowerBound, PUpperBound, PStride});
1464 Constant *Zero32 = ConstantInt::get(I32Type, 0);
1465 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32);
1466 Value *LowerBound =
1467 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb");
1468 Builder.CreateCondBr(MoreWork, Header, Exit);
1469
1470 // Change PHI-node in loop header to use outer cond rather than preheader,
1471 // and set IV to the LowerBound.
1472 Instruction *Phi = &Header->front();
1473 auto *PI = cast<PHINode>(Phi);
1474 PI->setIncomingBlock(0, OuterCond);
1475 PI->setIncomingValue(0, LowerBound);
1476
1477 // Then set the pre-header to jump to the OuterCond
1478 Instruction *Term = PreHeader->getTerminator();
1479 auto *Br = cast<BranchInst>(Term);
1480 Br->setSuccessor(0, OuterCond);
1481
1482 // Modify the inner condition:
1483 // * Use the UpperBound returned from the DynamicNext call.
1484 // * jump to the loop outer loop when done with one of the inner loops.
1485 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
1486 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
1487 Instruction *Comp = &*Builder.GetInsertPoint();
1488 auto *CI = cast<CmpInst>(Comp);
1489 CI->setOperand(1, UpperBound);
1490 // Redirect the inner exit to branch to outer condition.
1491 Instruction *Branch = &Cond->back();
1492 auto *BI = cast<BranchInst>(Branch);
1493 assert(BI->getSuccessor(1) == Exit)(static_cast <bool> (BI->getSuccessor(1) == Exit) ? void
(0) : __assert_fail ("BI->getSuccessor(1) == Exit", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1493, __extension__ __PRETTY_FUNCTION__))
;
1494 BI->setSuccessor(1, OuterCond);
1495
1496 // Add the barrier if requested.
1497 if (NeedsBarrier) {
1498 Builder.SetInsertPoint(&Exit->back());
1499 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
1500 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1501 /* CheckCancelFlag */ false);
1502 }
1503
1504 return AfterIP;
1505}
1506
1507/// Make \p Source branch to \p Target.
1508///
1509/// Handles two situations:
1510/// * \p Source already has an unconditional branch.
1511/// * \p Source is a degenerate block (no terminator because the BB is
1512/// the current head of the IR construction).
1513static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
1514 if (Instruction *Term = Source->getTerminator()) {
1515 auto *Br = cast<BranchInst>(Term);
1516 assert(!Br->isConditional() &&(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1517, __extension__ __PRETTY_FUNCTION__))
1517 "BB's terminator must be an unconditional branch (or degenerate)")(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1517, __extension__ __PRETTY_FUNCTION__))
;
1518 BasicBlock *Succ = Br->getSuccessor(0);
1519 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
1520 Br->setSuccessor(0, Target);
1521 return;
1522 }
1523
1524 auto *NewBr = BranchInst::Create(Target, Source);
1525 NewBr->setDebugLoc(DL);
1526}
1527
1528/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
1529/// after this \p OldTarget will be orphaned.
1530static void redirectAllPredecessorsTo(BasicBlock *OldTarget,
1531 BasicBlock *NewTarget, DebugLoc DL) {
1532 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
1533 redirectTo(Pred, NewTarget, DL);
1534}
1535
1536/// Determine which blocks in \p BBs are reachable from outside and remove the
1537/// ones that are not reachable from the function.
1538static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) {
1539 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
1540 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
1541 for (Use &U : BB->uses()) {
1542 auto *UseInst = dyn_cast<Instruction>(U.getUser());
1543 if (!UseInst)
1544 continue;
1545 if (BBsToErase.count(UseInst->getParent()))
1546 continue;
1547 return true;
1548 }
1549 return false;
1550 };
1551
1552 while (true) {
1553 bool Changed = false;
1554 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
1555 if (HasRemainingUses(BB)) {
1556 BBsToErase.erase(BB);
1557 Changed = true;
1558 }
1559 }
1560 if (!Changed)
1561 break;
1562 }
1563
1564 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
1565 DeleteDeadBlocks(BBVec);
1566}
1567
1568CanonicalLoopInfo *
1569OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1570 InsertPointTy ComputeIP) {
1571 assert(Loops.size() >= 1 && "At least one loop required")(static_cast <bool> (Loops.size() >= 1 && "At least one loop required"
) ? void (0) : __assert_fail ("Loops.size() >= 1 && \"At least one loop required\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1571, __extension__ __PRETTY_FUNCTION__))
;
1572 size_t NumLoops = Loops.size();
1573
1574 // Nothing to do if there is already just one loop.
1575 if (NumLoops == 1)
1576 return Loops.front();
1577
1578 CanonicalLoopInfo *Outermost = Loops.front();
1579 CanonicalLoopInfo *Innermost = Loops.back();
1580 BasicBlock *OrigPreheader = Outermost->getPreheader();
1581 BasicBlock *OrigAfter = Outermost->getAfter();
1582 Function *F = OrigPreheader->getParent();
1583
1584 // Setup the IRBuilder for inserting the trip count computation.
1585 Builder.SetCurrentDebugLocation(DL);
1586 if (ComputeIP.isSet())
1587 Builder.restoreIP(ComputeIP);
1588 else
1589 Builder.restoreIP(Outermost->getPreheaderIP());
1590
1591 // Derive the collapsed' loop trip count.
1592 // TODO: Find common/largest indvar type.
1593 Value *CollapsedTripCount = nullptr;
1594 for (CanonicalLoopInfo *L : Loops) {
1595 Value *OrigTripCount = L->getTripCount();
1596 if (!CollapsedTripCount) {
1597 CollapsedTripCount = OrigTripCount;
1598 continue;
1599 }
1600
1601 // TODO: Enable UndefinedSanitizer to diagnose an overflow here.
1602 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount,
1603 {}, /*HasNUW=*/true);
1604 }
1605
1606 // Create the collapsed loop control flow.
1607 CanonicalLoopInfo *Result =
1608 createLoopSkeleton(DL, CollapsedTripCount, F,
1609 OrigPreheader->getNextNode(), OrigAfter, "collapsed");
1610
1611 // Build the collapsed loop body code.
1612 // Start with deriving the input loop induction variables from the collapsed
1613 // one, using a divmod scheme. To preserve the original loops' order, the
1614 // innermost loop use the least significant bits.
1615 Builder.restoreIP(Result->getBodyIP());
1616
1617 Value *Leftover = Result->getIndVar();
1618 SmallVector<Value *> NewIndVars;
1619 NewIndVars.set_size(NumLoops);
1620 for (int i = NumLoops - 1; i >= 1; --i) {
1621 Value *OrigTripCount = Loops[i]->getTripCount();
1622
1623 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount);
1624 NewIndVars[i] = NewIndVar;
1625
1626 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount);
1627 }
1628 // Outermost loop gets all the remaining bits.
1629 NewIndVars[0] = Leftover;
1630
1631 // Construct the loop body control flow.
1632 // We progressively construct the branch structure following in direction of
1633 // the control flow, from the leading in-between code, the loop nest body, the
1634 // trailing in-between code, and rejoining the collapsed loop's latch.
1635 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If
1636 // the ContinueBlock is set, continue with that block. If ContinuePred, use
1637 // its predecessors as sources.
1638 BasicBlock *ContinueBlock = Result->getBody();
1639 BasicBlock *ContinuePred = nullptr;
1640 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest,
1641 BasicBlock *NextSrc) {
1642 if (ContinueBlock)
1643 redirectTo(ContinueBlock, Dest, DL);
1644 else
1645 redirectAllPredecessorsTo(ContinuePred, Dest, DL);
1646
1647 ContinueBlock = nullptr;
1648 ContinuePred = NextSrc;
1649 };
1650
1651 // The code before the nested loop of each level.
1652 // Because we are sinking it into the nest, it will be executed more often
1653 // that the original loop. More sophisticated schemes could keep track of what
1654 // the in-between code is and instantiate it only once per thread.
1655 for (size_t i = 0; i < NumLoops - 1; ++i)
1656 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader());
1657
1658 // Connect the loop nest body.
1659 ContinueWith(Innermost->getBody(), Innermost->getLatch());
1660
1661 // The code after the nested loop at each level.
1662 for (size_t i = NumLoops - 1; i > 0; --i)
1663 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch());
1664
1665 // Connect the finished loop to the collapsed loop latch.
1666 ContinueWith(Result->getLatch(), nullptr);
1667
1668 // Replace the input loops with the new collapsed loop.
1669 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL);
1670 redirectTo(Result->getAfter(), Outermost->getAfter(), DL);
1671
1672 // Replace the input loop indvars with the derived ones.
1673 for (size_t i = 0; i < NumLoops; ++i)
1674 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]);
1675
1676 // Remove unused parts of the input loops.
1677 SmallVector<BasicBlock *, 12> OldControlBBs;
1678 OldControlBBs.reserve(6 * Loops.size());
1679 for (CanonicalLoopInfo *Loop : Loops)
1680 Loop->collectControlBlocks(OldControlBBs);
1681 removeUnusedBlocksFromParent(OldControlBBs);
1682
1683#ifndef NDEBUG
1684 Result->assertOK();
1685#endif
1686 return Result;
1687}
1688
1689std::vector<CanonicalLoopInfo *>
1690OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1691 ArrayRef<Value *> TileSizes) {
1692 assert(TileSizes.size() == Loops.size() &&(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1693, __extension__ __PRETTY_FUNCTION__))
1693 "Must pass as many tile sizes as there are loops")(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1693, __extension__ __PRETTY_FUNCTION__))
;
1694 int NumLoops = Loops.size();
1695 assert(NumLoops >= 1 && "At least one loop to tile required")(static_cast <bool> (NumLoops >= 1 && "At least one loop to tile required"
) ? void (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1695, __extension__ __PRETTY_FUNCTION__))
;
1696
1697 CanonicalLoopInfo *OutermostLoop = Loops.front();
1698 CanonicalLoopInfo *InnermostLoop = Loops.back();
1699 Function *F = OutermostLoop->getBody()->getParent();
1700 BasicBlock *InnerEnter = InnermostLoop->getBody();
1701 BasicBlock *InnerLatch = InnermostLoop->getLatch();
1702
1703 // Collect original trip counts and induction variable to be accessible by
1704 // index. Also, the structure of the original loops is not preserved during
1705 // the construction of the tiled loops, so do it before we scavenge the BBs of
1706 // any original CanonicalLoopInfo.
1707 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
1708 for (CanonicalLoopInfo *L : Loops) {
1709 OrigTripCounts.push_back(L->getTripCount());
1710 OrigIndVars.push_back(L->getIndVar());
1711 }
1712
1713 // Collect the code between loop headers. These may contain SSA definitions
1714 // that are used in the loop nest body. To be usable with in the innermost
1715 // body, these BasicBlocks will be sunk into the loop nest body. That is,
1716 // these instructions may be executed more often than before the tiling.
1717 // TODO: It would be sufficient to only sink them into body of the
1718 // corresponding tile loop.
1719 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode;
1720 for (int i = 0; i < NumLoops - 1; ++i) {
1721 CanonicalLoopInfo *Surrounding = Loops[i];
1722 CanonicalLoopInfo *Nested = Loops[i + 1];
1723
1724 BasicBlock *EnterBB = Surrounding->getBody();
1725 BasicBlock *ExitBB = Nested->getHeader();
1726 InbetweenCode.emplace_back(EnterBB, ExitBB);
1727 }
1728
1729 // Compute the trip counts of the floor loops.
1730 Builder.SetCurrentDebugLocation(DL);
1731 Builder.restoreIP(OutermostLoop->getPreheaderIP());
1732 SmallVector<Value *, 4> FloorCount, FloorRems;
1733 for (int i = 0; i < NumLoops; ++i) {
1734 Value *TileSize = TileSizes[i];
1735 Value *OrigTripCount = OrigTripCounts[i];
1736 Type *IVType = OrigTripCount->getType();
1737
1738 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
1739 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
1740
1741 // 0 if tripcount divides the tilesize, 1 otherwise.
1742 // 1 means we need an additional iteration for a partial tile.
1743 //
1744 // Unfortunately we cannot just use the roundup-formula
1745 // (tripcount + tilesize - 1)/tilesize
1746 // because the summation might overflow. We do not want introduce undefined
1747 // behavior when the untiled loop nest did not.
1748 Value *FloorTripOverflow =
1749 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
1750
1751 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
1752 FloorTripCount =
1753 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
1754 "omp_floor" + Twine(i) + ".tripcount", true);
1755
1756 // Remember some values for later use.
1757 FloorCount.push_back(FloorTripCount);
1758 FloorRems.push_back(FloorTripRem);
1759 }
1760
1761 // Generate the new loop nest, from the outermost to the innermost.
1762 std::vector<CanonicalLoopInfo *> Result;
1763 Result.reserve(NumLoops * 2);
1764
1765 // The basic block of the surrounding loop that enters the nest generated
1766 // loop.
1767 BasicBlock *Enter = OutermostLoop->getPreheader();
1768
1769 // The basic block of the surrounding loop where the inner code should
1770 // continue.
1771 BasicBlock *Continue = OutermostLoop->getAfter();
1772
1773 // Where the next loop basic block should be inserted.
1774 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
1775
1776 auto EmbeddNewLoop =
1777 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
1778 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
1779 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
1780 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
1781 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
1782 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
1783
1784 // Setup the position where the next embedded loop connects to this loop.
1785 Enter = EmbeddedLoop->getBody();
1786 Continue = EmbeddedLoop->getLatch();
1787 OutroInsertBefore = EmbeddedLoop->getLatch();
1788 return EmbeddedLoop;
1789 };
1790
1791 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
1792 const Twine &NameBase) {
1793 for (auto P : enumerate(TripCounts)) {
1794 CanonicalLoopInfo *EmbeddedLoop =
1795 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
1796 Result.push_back(EmbeddedLoop);
1797 }
1798 };
1799
1800 EmbeddNewLoops(FloorCount, "floor");
1801
1802 // Within the innermost floor loop, emit the code that computes the tile
1803 // sizes.
1804 Builder.SetInsertPoint(Enter->getTerminator());
1805 SmallVector<Value *, 4> TileCounts;
1806 for (int i = 0; i < NumLoops; ++i) {
1807 CanonicalLoopInfo *FloorLoop = Result[i];
1808 Value *TileSize = TileSizes[i];
1809
1810 Value *FloorIsEpilogue =
1811 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
1812 Value *TileTripCount =
1813 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
1814
1815 TileCounts.push_back(TileTripCount);
1816 }
1817
1818 // Create the tile loops.
1819 EmbeddNewLoops(TileCounts, "tile");
1820
1821 // Insert the inbetween code into the body.
1822 BasicBlock *BodyEnter = Enter;
1823 BasicBlock *BodyEntered = nullptr;
1824 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
1825 BasicBlock *EnterBB = P.first;
1826 BasicBlock *ExitBB = P.second;
1827
1828 if (BodyEnter)
1829 redirectTo(BodyEnter, EnterBB, DL);
1830 else
1831 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
1832
1833 BodyEnter = nullptr;
1834 BodyEntered = ExitBB;
1835 }
1836
1837 // Append the original loop nest body into the generated loop nest body.
1838 if (BodyEnter)
1839 redirectTo(BodyEnter, InnerEnter, DL);
1840 else
1841 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
1842 redirectAllPredecessorsTo(InnerLatch, Continue, DL);
1843
1844 // Replace the original induction variable with an induction variable computed
1845 // from the tile and floor induction variables.
1846 Builder.restoreIP(Result.back()->getBodyIP());
1847 for (int i = 0; i < NumLoops; ++i) {
1848 CanonicalLoopInfo *FloorLoop = Result[i];
1849 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
1850 Value *OrigIndVar = OrigIndVars[i];
1851 Value *Size = TileSizes[i];
1852
1853 Value *Scale =
1854 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
1855 Value *Shift =
1856 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
1857 OrigIndVar->replaceAllUsesWith(Shift);
1858 }
1859
1860 // Remove unused parts of the original loops.
1861 SmallVector<BasicBlock *, 12> OldControlBBs;
1862 OldControlBBs.reserve(6 * Loops.size());
1863 for (CanonicalLoopInfo *Loop : Loops)
1864 Loop->collectControlBlocks(OldControlBBs);
1865 removeUnusedBlocksFromParent(OldControlBBs);
1866
1867#ifndef NDEBUG
1868 for (CanonicalLoopInfo *GenL : Result)
1869 GenL->assertOK();
1870#endif
1871 return Result;
1872}
1873
1874OpenMPIRBuilder::InsertPointTy
1875OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
1876 llvm::Value *BufSize, llvm::Value *CpyBuf,
1877 llvm::Value *CpyFn, llvm::Value *DidIt) {
1878 if (!updateToLocation(Loc))
1879 return Loc.IP;
1880
1881 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1882 Value *Ident = getOrCreateIdent(SrcLocStr);
1883 Value *ThreadId = getOrCreateThreadID(Ident);
1884
1885 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
1886
1887 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
1888
1889 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
1890 Builder.CreateCall(Fn, Args);
1891
1892 return Builder.saveIP();
1893}
1894
1895OpenMPIRBuilder::InsertPointTy
1896OpenMPIRBuilder::createSingle(const LocationDescription &Loc,
1897 BodyGenCallbackTy BodyGenCB,
1898 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) {
1899
1900 if (!updateToLocation(Loc))
1901 return Loc.IP;
1902
1903 // If needed (i.e. not null), initialize `DidIt` with 0
1904 if (DidIt) {
1905 Builder.CreateStore(Builder.getInt32(0), DidIt);
1906 }
1907
1908 Directive OMPD = Directive::OMPD_single;
1909 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1910 Value *Ident = getOrCreateIdent(SrcLocStr);
1911 Value *ThreadId = getOrCreateThreadID(Ident);
1912 Value *Args[] = {Ident, ThreadId};
1913
1914 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
1915 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1916
1917 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
1918 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1919
1920 // generates the following:
1921 // if (__kmpc_single()) {
1922 // .... single region ...
1923 // __kmpc_end_single
1924 // }
1925
1926 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1927 /*Conditional*/ true, /*hasFinalize*/ true);
1928}
1929
1930OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical(
1931 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
1932 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
1933
1934 if (!updateToLocation(Loc))
1935 return Loc.IP;
1936
1937 Directive OMPD = Directive::OMPD_critical;
1938 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1939 Value *Ident = getOrCreateIdent(SrcLocStr);
1940 Value *ThreadId = getOrCreateThreadID(Ident);
1941 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
1942 Value *Args[] = {Ident, ThreadId, LockVar};
1943
1944 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
1945 Function *RTFn = nullptr;
1946 if (HintInst) {
1947 // Add Hint to entry Args and create call
1948 EnterArgs.push_back(HintInst);
1949 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
1950 } else {
1951 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
1952 }
1953 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
1954
1955 Function *ExitRTLFn =
1956 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
1957 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1958
1959 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1960 /*Conditional*/ false, /*hasFinalize*/ true);
1961}
1962
1963OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
1964 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
1965 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
1966 bool HasFinalize, bool IsCancellable) {
1967
1968 if (HasFinalize)
1969 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable});
1970
1971 // Create inlined region's entry and body blocks, in preparation
1972 // for conditional creation
1973 BasicBlock *EntryBB = Builder.GetInsertBlock();
1974 Instruction *SplitPos = EntryBB->getTerminator();
1975 if (!isa_and_nonnull<BranchInst>(SplitPos))
1976 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
1977 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
1978 BasicBlock *FiniBB =
1979 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
1980
1981 Builder.SetInsertPoint(EntryBB->getTerminator());
1982 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
1983
1984 // generate body
1985 BodyGenCB(/* AllocaIP */ InsertPointTy(),
1986 /* CodeGenIP */ Builder.saveIP(), *FiniBB);
1987
1988 // If we didn't emit a branch to FiniBB during body generation, it means
1989 // FiniBB is unreachable (e.g. while(1);). stop generating all the
1990 // unreachable blocks, and remove anything we are not going to use.
1991 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0);
1992 if (SkipEmittingRegion) {
1993 FiniBB->eraseFromParent();
1994 ExitCall->eraseFromParent();
1995 // Discard finalization if we have it.
1996 if (HasFinalize) {
1997 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1998, __extension__ __PRETTY_FUNCTION__))
1998 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1998, __extension__ __PRETTY_FUNCTION__))
;
1999 FinalizationStack.pop_back();
2000 }
2001 } else {
2002 // emit exit call and do any needed finalization.
2003 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
2004 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2006, __extension__ __PRETTY_FUNCTION__))
2005 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2006, __extension__ __PRETTY_FUNCTION__))
2006 "Unexpected control flow graph state!!")(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2006, __extension__ __PRETTY_FUNCTION__))
;
2007 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
2008 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2009, __extension__ __PRETTY_FUNCTION__))
2009 "Unexpected Control Flow State!")(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2009, __extension__ __PRETTY_FUNCTION__))
;
2010 MergeBlockIntoPredecessor(FiniBB);
2011 }
2012
2013 // If we are skipping the region of a non conditional, remove the exit
2014 // block, and clear the builder's insertion point.
2015 assert(SplitPos->getParent() == ExitBB &&(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2016, __extension__ __PRETTY_FUNCTION__))
2016 "Unexpected Insertion point location!")(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2016, __extension__ __PRETTY_FUNCTION__))
;
2017 if (!Conditional && SkipEmittingRegion) {
2018 ExitBB->eraseFromParent();
2019 Builder.ClearInsertionPoint();
2020 } else {
2021 auto merged = MergeBlockIntoPredecessor(ExitBB);
2022 BasicBlock *ExitPredBB = SplitPos->getParent();
2023 auto InsertBB = merged ? ExitPredBB : ExitBB;
2024 if (!isa_and_nonnull<BranchInst>(SplitPos))
2025 SplitPos->eraseFromParent();
2026 Builder.SetInsertPoint(InsertBB);
2027 }
2028
2029 return Builder.saveIP();
2030}
2031
2032OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
2033 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
2034 // if nothing to do, Return current insertion point.
2035 if (!Conditional || !EntryCall)
2036 return Builder.saveIP();
2037
2038 BasicBlock *EntryBB = Builder.GetInsertBlock();
2039 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
2040 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
2041 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
2042
2043 // Emit thenBB and set the Builder's insertion point there for
2044 // body generation next. Place the block after the current block.
2045 Function *CurFn = EntryBB->getParent();
2046 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB);
2047
2048 // Move Entry branch to end of ThenBB, and replace with conditional
2049 // branch (If-stmt)
2050 Instruction *EntryBBTI = EntryBB->getTerminator();
2051 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
2052 EntryBBTI->removeFromParent();
2053 Builder.SetInsertPoint(UI);
2054 Builder.Insert(EntryBBTI);
2055 UI->eraseFromParent();
2056 Builder.SetInsertPoint(ThenBB->getTerminator());
2057
2058 // return an insertion point to ExitBB.
2059 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
2060}
2061
2062OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
2063 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
2064 bool HasFinalize) {
2065
2066 Builder.restoreIP(FinIP);
2067
2068 // If there is finalization to do, emit it before the exit call
2069 if (HasFinalize) {
2070 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2071, __extension__ __PRETTY_FUNCTION__))
2071 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2071, __extension__ __PRETTY_FUNCTION__))
;
2072
2073 FinalizationInfo Fi = FinalizationStack.pop_back_val();
2074 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")(static_cast <bool> (Fi.DK == OMPD && "Unexpected Directive for Finalization call!"
) ? void (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2074, __extension__ __PRETTY_FUNCTION__))
;
2075
2076 Fi.FiniCB(FinIP);
2077
2078 BasicBlock *FiniBB = FinIP.getBlock();
2079 Instruction *FiniBBTI = FiniBB->getTerminator();
2080
2081 // set Builder IP for call creation
2082 Builder.SetInsertPoint(FiniBBTI);
2083 }
2084
2085 if (!ExitCall)
2086 return Builder.saveIP();
2087
2088 // place the Exitcall as last instruction before Finalization block terminator
2089 ExitCall->removeFromParent();
2090 Builder.Insert(ExitCall);
2091
2092 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
2093 ExitCall->getIterator());
2094}
2095
2096OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks(
2097 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
2098 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
2099 if (!IP.isSet())
2100 return IP;
2101
2102 IRBuilder<>::InsertPointGuard IPG(Builder);
2103
2104 // creates the following CFG structure
2105 // OMP_Entry : (MasterAddr != PrivateAddr)?
2106 // F T
2107 // | \
2108 // | copin.not.master
2109 // | /
2110 // v /
2111 // copyin.not.master.end
2112 // |
2113 // v
2114 // OMP.Entry.Next
2115
2116 BasicBlock *OMP_Entry = IP.getBlock();
2117 Function *CurFn = OMP_Entry->getParent();
2118 BasicBlock *CopyBegin =
2119 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
2120 BasicBlock *CopyEnd = nullptr;
2121
2122 // If entry block is terminated, split to preserve the branch to following
2123 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
2124 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
2125 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
2126 "copyin.not.master.end");
2127 OMP_Entry->getTerminator()->eraseFromParent();
2128 } else {
2129 CopyEnd =
2130 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
2131 }
2132
2133 Builder.SetInsertPoint(OMP_Entry);
2134 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
2135 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
2136 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
2137 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
2138
2139 Builder.SetInsertPoint(CopyBegin);
2140 if (BranchtoEnd)
2141 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd));
2142
2143 return Builder.saveIP();
2144}
2145
2146CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc,
2147 Value *Size, Value *Allocator,
2148 std::string Name) {
2149 IRBuilder<>::InsertPointGuard IPG(Builder);
2150 Builder.restoreIP(Loc.IP);
2151
2152 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2153 Value *Ident = getOrCreateIdent(SrcLocStr);
2154 Value *ThreadId = getOrCreateThreadID(Ident);
2155 Value *Args[] = {ThreadId, Size, Allocator};
2156
2157 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
2158
2159 return Builder.CreateCall(Fn, Args, Name);
2160}
2161
2162CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc,
2163 Value *Addr, Value *Allocator,
2164 std::string Name) {
2165 IRBuilder<>::InsertPointGuard IPG(Builder);
2166 Builder.restoreIP(Loc.IP);
2167
2168 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2169 Value *Ident = getOrCreateIdent(SrcLocStr);
2170 Value *ThreadId = getOrCreateThreadID(Ident);
2171 Value *Args[] = {ThreadId, Addr, Allocator};
2172 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
2173 return Builder.CreateCall(Fn, Args, Name);
2174}
2175
2176CallInst *OpenMPIRBuilder::createCachedThreadPrivate(
2177 const LocationDescription &Loc, llvm::Value *Pointer,
2178 llvm::ConstantInt *Size, const llvm::Twine &Name) {
2179 IRBuilder<>::InsertPointGuard IPG(Builder);
2180 Builder.restoreIP(Loc.IP);
2181
2182 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2183 Value *Ident = getOrCreateIdent(SrcLocStr);
2184 Value *ThreadId = getOrCreateThreadID(Ident);
2185 Constant *ThreadPrivateCache =
2186 getOrCreateOMPInternalVariable(Int8PtrPtr, Name);
2187 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache};
2188
2189 Function *Fn =
2190 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached);
2191
2192 return Builder.CreateCall(Fn, Args);
2193}
2194
2195OpenMPIRBuilder::InsertPointTy
2196OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime) {
2197 if (!updateToLocation(Loc))
2198 return Loc.IP;
2199
2200 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2201 Value *Ident = getOrCreateIdent(SrcLocStr);
2202 ConstantInt *IsSPMDVal = ConstantInt::getBool(Int32->getContext(), IsSPMD);
2203 ConstantInt *UseGenericStateMachine =
2204 ConstantInt::getBool(Int32->getContext(), !IsSPMD);
2205 ConstantInt *RequiresFullRuntimeVal = ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
2206
2207 Function *Fn = getOrCreateRuntimeFunctionPtr(
2208 omp::RuntimeFunction::OMPRTL___kmpc_target_init);
2209
2210 CallInst *ThreadKind =
2211 Builder.CreateCall(Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal});
2212
2213 Value *ExecUserCode = Builder.CreateICmpEQ(
2214 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), "exec_user_code");
2215
2216 // ThreadKind = __kmpc_target_init(...)
2217 // if (ThreadKind == -1)
2218 // user_code
2219 // else
2220 // return;
2221
2222 auto *UI = Builder.CreateUnreachable();
2223 BasicBlock *CheckBB = UI->getParent();
2224 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry");
2225
2226 BasicBlock *WorkerExitBB = BasicBlock::Create(
2227 CheckBB->getContext(), "worker.exit", CheckBB->getParent());
2228 Builder.SetInsertPoint(WorkerExitBB);
2229 Builder.CreateRetVoid();
2230
2231 auto *CheckBBTI = CheckBB->getTerminator();
2232 Builder.SetInsertPoint(CheckBBTI);
2233 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB);
2234
2235 CheckBBTI->eraseFromParent();
2236 UI->eraseFromParent();
2237
2238 // Continue in the "user_code" block, see diagram above and in
2239 // openmp/libomptarget/deviceRTLs/common/include/target.h .
2240 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt());
2241}
2242
2243void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc,
2244 bool IsSPMD, bool RequiresFullRuntime) {
2245 if (!updateToLocation(Loc))
2246 return;
2247
2248 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2249 Value *Ident = getOrCreateIdent(SrcLocStr);
2250 ConstantInt *IsSPMDVal = ConstantInt::getBool(Int32->getContext(), IsSPMD);
2251 ConstantInt *RequiresFullRuntimeVal = ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
2252
2253 Function *Fn = getOrCreateRuntimeFunctionPtr(
2254 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit);
2255
2256 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal});
2257}
2258
2259std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts,
2260 StringRef FirstSeparator,
2261 StringRef Separator) {
2262 SmallString<128> Buffer;
2263 llvm::raw_svector_ostream OS(Buffer);
2264 StringRef Sep = FirstSeparator;
2265 for (StringRef Part : Parts) {
2266 OS << Sep << Part;
2267 Sep = Separator;
2268 }
2269 return OS.str().str();
2270}
2271
2272Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable(
2273 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
2274 // TODO: Replace the twine arg with stringref to get rid of the conversion
2275 // logic. However This is taken from current implementation in clang as is.
2276 // Since this method is used in many places exclusively for OMP internal use
2277 // we will keep it as is for temporarily until we move all users to the
2278 // builder and then, if possible, fix it everywhere in one go.
2279 SmallString<256> Buffer;
2280 llvm::raw_svector_ostream Out(Buffer);
2281 Out << Name;
2282 StringRef RuntimeName = Out.str();
2283 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2284 if (Elem.second) {
2285 assert(Elem.second->getType()->getPointerElementType() == Ty &&(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2286, __extension__ __PRETTY_FUNCTION__))
2286 "OMP internal variable has different type than requested")(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2286, __extension__ __PRETTY_FUNCTION__))
;
2287 } else {
2288 // TODO: investigate the appropriate linkage type used for the global
2289 // variable for possibly changing that to internal or private, or maybe
2290 // create different versions of the function for different OMP internal
2291 // variables.
2292 Elem.second = new llvm::GlobalVariable(
2293 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage,
2294 llvm::Constant::getNullValue(Ty), Elem.first(),
2295 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
2296 AddressSpace);
2297 }
2298
2299 return Elem.second;
2300}
2301
2302Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) {
2303 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2304 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", ".");
2305 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name);
2306}
2307
2308GlobalVariable *
2309OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
2310 std::string VarName) {
2311 llvm::Constant *MaptypesArrayInit =
2312 llvm::ConstantDataArray::get(M.getContext(), Mappings);
2313 auto *MaptypesArrayGlobal = new llvm::GlobalVariable(
2314 M, MaptypesArrayInit->getType(),
2315 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit,
2316 VarName);
2317 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2318 return MaptypesArrayGlobal;
2319}
2320
2321bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic(
2322 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) {
2323 assert(!(AO == AtomicOrdering::NotAtomic ||(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2325, __extension__ __PRETTY_FUNCTION__))
2324 AO == llvm::AtomicOrdering::Unordered) &&(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2325, __extension__ __PRETTY_FUNCTION__))
2325 "Unexpected Atomic Ordering.")(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2325, __extension__ __PRETTY_FUNCTION__))
;
2326
2327 bool Flush = false;
2328 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic;
2329
2330 switch (AK) {
2331 case Read:
2332 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease ||
2333 AO == AtomicOrdering::SequentiallyConsistent) {
2334 FlushAO = AtomicOrdering::Acquire;
2335 Flush = true;
2336 }
2337 break;
2338 case Write:
2339 case Update:
2340 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease ||
2341 AO == AtomicOrdering::SequentiallyConsistent) {
2342 FlushAO = AtomicOrdering::Release;
2343 Flush = true;
2344 }
2345 break;
2346 case Capture:
2347 switch (AO) {
2348 case AtomicOrdering::Acquire:
2349 FlushAO = AtomicOrdering::Acquire;
2350 Flush = true;
2351 break;
2352 case AtomicOrdering::Release:
2353 FlushAO = AtomicOrdering::Release;
2354 Flush = true;
2355 break;
2356 case AtomicOrdering::AcquireRelease:
2357 case AtomicOrdering::SequentiallyConsistent:
2358 FlushAO = AtomicOrdering::AcquireRelease;
2359 Flush = true;
2360 break;
2361 default:
2362 // do nothing - leave silently.
2363 break;
2364 }
2365 }
2366
2367 if (Flush) {
2368 // Currently Flush RT call still doesn't take memory_ordering, so for when
2369 // that happens, this tries to do the resolution of which atomic ordering
2370 // to use with but issue the flush call
2371 // TODO: pass `FlushAO` after memory ordering support is added
2372 (void)FlushAO;
2373 emitFlush(Loc);
2374 }
2375
2376 // for AO == AtomicOrdering::Monotonic and all other case combinations
2377 // do nothing
2378 return Flush;
2379}
2380
2381OpenMPIRBuilder::InsertPointTy
2382OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc,
2383 AtomicOpValue &X, AtomicOpValue &V,
2384 AtomicOrdering AO) {
2385 if (!updateToLocation(Loc))
2386 return Loc.IP;
2387
2388 Type *XTy = X.Var->getType();
2389 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2389, __extension__ __PRETTY_FUNCTION__))
;
2390 Type *XElemTy = XTy->getPointerElementType();
2391 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2393, __extension__ __PRETTY_FUNCTION__))
2392 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2393, __extension__ __PRETTY_FUNCTION__))
2393 "OMP atomic read expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2393, __extension__ __PRETTY_FUNCTION__))
;
2394
2395 Value *XRead = nullptr;
2396
2397 if (XElemTy->isIntegerTy()) {
2398 LoadInst *XLD =
2399 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read");
2400 XLD->setAtomic(AO);
2401 XRead = cast<Value>(XLD);
2402 } else {
2403 // We need to bitcast and perform atomic op as integer
2404 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
2405 IntegerType *IntCastTy =
2406 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2407 Value *XBCast = Builder.CreateBitCast(
2408 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast");
2409 LoadInst *XLoad =
2410 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load");
2411 XLoad->setAtomic(AO);
2412 if (XElemTy->isFloatingPointTy()) {
2413 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast");
2414 } else {
2415 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast");
2416 }
2417 }
2418 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read);
2419 Builder.CreateStore(XRead, V.Var, V.IsVolatile);
2420 return Builder.saveIP();
2421}
2422
2423OpenMPIRBuilder::InsertPointTy
2424OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
2425 AtomicOpValue &X, Value *Expr,
2426 AtomicOrdering AO) {
2427 if (!updateToLocation(Loc))
2428 return Loc.IP;
2429
2430 Type *XTy = X.Var->getType();
2431 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2431, __extension__ __PRETTY_FUNCTION__))
;
2432 Type *XElemTy = XTy->getPointerElementType();
2433 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2435, __extension__ __PRETTY_FUNCTION__))
2434 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2435, __extension__ __PRETTY_FUNCTION__))
2435 "OMP atomic write expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2435, __extension__ __PRETTY_FUNCTION__))
;
2436
2437 if (XElemTy->isIntegerTy()) {
2438 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
2439 XSt->setAtomic(AO);
2440 } else {
2441 // We need to bitcast and perform atomic op as integers
2442 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
2443 IntegerType *IntCastTy =
2444 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2445 Value *XBCast = Builder.CreateBitCast(
2446 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast");
2447 Value *ExprCast =
2448 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast");
2449 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile);
2450 XSt->setAtomic(AO);
2451 }
2452
2453 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write);
2454 return Builder.saveIP();
2455}
2456
2457OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate(
2458 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
2459 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
2460 AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart) {
2461 if (!updateToLocation(Loc))
2462 return Loc.IP;
2463
2464 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2465 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2466 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2467 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2468 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2469 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2470 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2471 "OMP atomic update expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2472 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2473 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2474 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2475 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2467, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2474, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
2476
2477 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile,
2478 IsXLHSInRHSPart);
2479 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update);
2480 return Builder.saveIP();
2481}
2482
2483Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
2484 AtomicRMWInst::BinOp RMWOp) {
2485 switch (RMWOp) {
2486 case AtomicRMWInst::Add:
2487 return Builder.CreateAdd(Src1, Src2);
2488 case AtomicRMWInst::Sub:
2489 return Builder.CreateSub(Src1, Src2);
2490 case AtomicRMWInst::And:
2491 return Builder.CreateAnd(Src1, Src2);
2492 case AtomicRMWInst::Nand:
2493 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2));
2494 case AtomicRMWInst::Or:
2495 return Builder.CreateOr(Src1, Src2);
2496 case AtomicRMWInst::Xor:
2497 return Builder.CreateXor(Src1, Src2);
2498 case AtomicRMWInst::Xchg:
2499 case AtomicRMWInst::FAdd:
2500 case AtomicRMWInst::FSub:
2501 case AtomicRMWInst::BAD_BINOP:
2502 case AtomicRMWInst::Max:
2503 case AtomicRMWInst::Min:
2504 case AtomicRMWInst::UMax:
2505 case AtomicRMWInst::UMin:
2506 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2506)
;
2507 }
2508 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2508)
;
2509}
2510
2511std::pair<Value *, Value *>
2512OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr,
2513 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
2514 AtomicUpdateCallbackTy &UpdateOp,
2515 bool VolatileX, bool IsXLHSInRHSPart) {
2516 Type *XElemTy = X->getType()->getPointerElementType();
2517
2518 bool DoCmpExch =
2519 ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) ||
2520 (RMWOp == AtomicRMWInst::FSub) ||
2521 (RMWOp == AtomicRMWInst::Sub && !IsXLHSInRHSPart);
2522
2523 std::pair<Value *, Value *> Res;
2524 if (XElemTy->isIntegerTy() && !DoCmpExch) {
2525 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
2526 // not needed except in case of postfix captures. Generate anyway for
2527 // consistency with the else part. Will be removed with any DCE pass.
2528 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp);
2529 } else {
2530 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace();
2531 IntegerType *IntCastTy =
2532 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2533 Value *XBCast =
2534 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
2535 LoadInst *OldVal =
2536 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load");
2537 OldVal->setAtomic(AO);
2538 // CurBB
2539 // | /---\
2540 // ContBB |
2541 // | \---/
2542 // ExitBB
2543 BasicBlock *CurBB = Builder.GetInsertBlock();
2544 Instruction *CurBBTI = CurBB->getTerminator();
2545 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable();
2546 BasicBlock *ExitBB =
2547 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit");
2548 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(),
2549 X->getName() + ".atomic.cont");
2550 ContBB->getTerminator()->eraseFromParent();
2551 Builder.SetInsertPoint(ContBB);
2552 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2);
2553 PHI->addIncoming(OldVal, CurBB);
2554 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy);
2555 NewAtomicAddr->setName(X->getName() + "x.new.val");
2556 NewAtomicAddr->moveBefore(AllocIP);
2557 IntegerType *NewAtomicCastTy =
2558 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2559 bool IsIntTy = XElemTy->isIntegerTy();
2560 Value *NewAtomicIntAddr =
2561 (IsIntTy)
2562 ? NewAtomicAddr
2563 : Builder.CreateBitCast(NewAtomicAddr,
2564 NewAtomicCastTy->getPointerTo(Addrspace));
2565 Value *OldExprVal = PHI;
2566 if (!IsIntTy) {
2567 if (XElemTy->isFloatingPointTy()) {
2568 OldExprVal = Builder.CreateBitCast(PHI, XElemTy,
2569 X->getName() + ".atomic.fltCast");
2570 } else {
2571 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy,
2572 X->getName() + ".atomic.ptrCast");
2573 }
2574 }
2575
2576 Value *Upd = UpdateOp(OldExprVal, Builder);
2577 Builder.CreateStore(Upd, NewAtomicAddr);
2578 LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr);
2579 Value *XAddr =
2580 (IsIntTy)
2581 ? X
2582 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
2583 AtomicOrdering Failure =
2584 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
2585 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg(
2586 XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure);
2587 Result->setVolatile(VolatileX);
2588 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0);
2589 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1);
2590 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock());
2591 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB);
2592
2593 Res.first = OldExprVal;
2594 Res.second = Upd;
2595
2596 // set Insertion point in exit block
2597 if (UnreachableInst *ExitTI =
2598 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) {
2599 CurBBTI->eraseFromParent();
2600 Builder.SetInsertPoint(ExitBB);
2601 } else {
2602 Builder.SetInsertPoint(ExitTI);
2603 }
2604 }
2605
2606 return Res;
2607}
2608
2609OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture(
2610 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
2611 AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
2612 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
2613 bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart) {
2614 if (!updateToLocation(Loc))
2615 return Loc.IP;
2616
2617 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2618 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2619 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2620 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2621 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2622 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2623 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2624 "OMP atomic capture expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2625 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2626 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2627 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2620, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2624, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2626, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
2628
2629 // If UpdateExpr is 'x' updated with some `expr` not based on 'x',
2630 // 'x' is simply atomically rewritten with 'expr'.
2631 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg);
2632 std::pair<Value *, Value *> Result =
2633 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp,
2634 X.IsVolatile, IsXLHSInRHSPart);
2635
2636 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second);
2637 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile);
2638
2639 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture);
2640 return Builder.saveIP();
2641}
2642
2643GlobalVariable *
2644OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
2645 std::string VarName) {
2646 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get(
2647 llvm::ArrayType::get(
2648 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()),
2649 Names);
2650 auto *MapNamesArrayGlobal = new llvm::GlobalVariable(
2651 M, MapNamesArrayInit->getType(),
2652 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit,
2653 VarName);
2654 return MapNamesArrayGlobal;
2655}
2656
2657// Create all simple and struct types exposed by the runtime and remember
2658// the llvm::PointerTypes of them for easy access later.
2659void OpenMPIRBuilder::initializeTypes(Module &M) {
2660 LLVMContext &Ctx = M.getContext();
2661 StructType *T;
2662#define OMP_TYPE(VarName, InitValue) VarName = InitValue;
2663#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
2664 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \
2665 VarName##PtrTy = PointerType::getUnqual(VarName##Ty);
2666#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
2667 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \
2668 VarName##Ptr = PointerType::getUnqual(VarName);
2669#define OMP_STRUCT_TYPE(VarName, StructName, ...) \
2670 T = StructType::getTypeByName(Ctx, StructName); \
2671 if (!T) \
2672 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \
2673 VarName = T; \
2674 VarName##Ptr = PointerType::getUnqual(T);
2675#include "llvm/Frontend/OpenMP/OMPKinds.def"
2676}
2677
2678void OpenMPIRBuilder::OutlineInfo::collectBlocks(
2679 SmallPtrSetImpl<BasicBlock *> &BlockSet,
2680 SmallVectorImpl<BasicBlock *> &BlockVector) {
2681 SmallVector<BasicBlock *, 32> Worklist;
2682 BlockSet.insert(EntryBB);
2683 BlockSet.insert(ExitBB);
2684
2685 Worklist.push_back(EntryBB);
2686 while (!Worklist.empty()) {
2687 BasicBlock *BB = Worklist.pop_back_val();
2688 BlockVector.push_back(BB);
2689 for (BasicBlock *SuccBB : successors(BB))
2690 if (BlockSet.insert(SuccBB).second)
2691 Worklist.push_back(SuccBB);
2692 }
2693}
2694
2695void CanonicalLoopInfo::collectControlBlocks(
2696 SmallVectorImpl<BasicBlock *> &BBs) {
2697 // We only count those BBs as control block for which we do not need to
2698 // reverse the CFG, i.e. not the loop body which can contain arbitrary control
2699 // flow. For consistency, this also means we do not add the Body block, which
2700 // is just the entry to the body code.
2701 BBs.reserve(BBs.size() + 6);
2702 BBs.append({Preheader, Header, Cond, Latch, Exit, After});
2703}
2704
2705void CanonicalLoopInfo::assertOK() const {
2706#ifndef NDEBUG
2707 if (!IsValid)
2708 return;
2709
2710 // Verify standard control-flow we use for OpenMP loops.
2711 assert(Preheader)(static_cast <bool> (Preheader) ? void (0) : __assert_fail
("Preheader", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2711, __extension__ __PRETTY_FUNCTION__))
;
2712 assert(isa<BranchInst>(Preheader->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2713, __extension__ __PRETTY_FUNCTION__))
2713 "Preheader must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2713, __extension__ __PRETTY_FUNCTION__))
;
2714 assert(Preheader->getSingleSuccessor() == Header &&(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2715, __extension__ __PRETTY_FUNCTION__))
2715 "Preheader must jump to header")(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2715, __extension__ __PRETTY_FUNCTION__))
;
2716
2717 assert(Header)(static_cast <bool> (Header) ? void (0) : __assert_fail
("Header", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2717, __extension__ __PRETTY_FUNCTION__))
;
2718 assert(isa<BranchInst>(Header->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2719, __extension__ __PRETTY_FUNCTION__))
2719 "Header must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2719, __extension__ __PRETTY_FUNCTION__))
;
2720 assert(Header->getSingleSuccessor() == Cond &&(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
2721 "Header must jump to exiting block")(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2721, __extension__ __PRETTY_FUNCTION__))
;
2722
2723 assert(Cond)(static_cast <bool> (Cond) ? void (0) : __assert_fail (
"Cond", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2723, __extension__ __PRETTY_FUNCTION__))
;
2724 assert(Cond->getSinglePredecessor() == Header &&(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2725, __extension__ __PRETTY_FUNCTION__))
2725 "Exiting block only reachable from header")(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2725, __extension__ __PRETTY_FUNCTION__))
;
2726
2727 assert(isa<BranchInst>(Cond->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2728, __extension__ __PRETTY_FUNCTION__))
2728 "Exiting block must terminate with conditional branch")(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2728, __extension__ __PRETTY_FUNCTION__))
;
2729 assert(size(successors(Cond)) == 2 &&(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2730, __extension__ __PRETTY_FUNCTION__))
2730 "Exiting block must have two successors")(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2730, __extension__ __PRETTY_FUNCTION__))
;
2731 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2732, __extension__ __PRETTY_FUNCTION__))
2732 "Exiting block's first successor jump to the body")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2732, __extension__ __PRETTY_FUNCTION__))
;
2733 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2734, __extension__ __PRETTY_FUNCTION__))
2734 "Exiting block's second successor must exit the loop")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2734, __extension__ __PRETTY_FUNCTION__))
;
2735
2736 assert(Body)(static_cast <bool> (Body) ? void (0) : __assert_fail (
"Body", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2736, __extension__ __PRETTY_FUNCTION__))
;
2737 assert(Body->getSinglePredecessor() == Cond &&(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2738, __extension__ __PRETTY_FUNCTION__))
2738 "Body only reachable from exiting block")(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2738, __extension__ __PRETTY_FUNCTION__))
;
2739 assert(!isa<PHINode>(Body->front()))(static_cast <bool> (!isa<PHINode>(Body->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Body->front())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2739, __extension__ __PRETTY_FUNCTION__))
;
2740
2741 assert(Latch)(static_cast <bool> (Latch) ? void (0) : __assert_fail (
"Latch", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2741, __extension__ __PRETTY_FUNCTION__))
;
2742 assert(isa<BranchInst>(Latch->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2743, __extension__ __PRETTY_FUNCTION__))
2743 "Latch must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2743, __extension__ __PRETTY_FUNCTION__))
;
2744 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")(static_cast <bool> (Latch->getSingleSuccessor() == Header
&& "Latch must jump to header") ? void (0) : __assert_fail
("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2744, __extension__ __PRETTY_FUNCTION__))
;
2745 // TODO: To support simple redirecting of the end of the body code that has
2746 // multiple; introduce another auxiliary basic block like preheader and after.
2747 assert(Latch->getSinglePredecessor() != nullptr)(static_cast <bool> (Latch->getSinglePredecessor() !=
nullptr) ? void (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2747, __extension__ __PRETTY_FUNCTION__))
;
2748 assert(!isa<PHINode>(Latch->front()))(static_cast <bool> (!isa<PHINode>(Latch->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Latch->front())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2748, __extension__ __PRETTY_FUNCTION__))
;
2749
2750 assert(Exit)(static_cast <bool> (Exit) ? void (0) : __assert_fail (
"Exit", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2750, __extension__ __PRETTY_FUNCTION__))
;
2751 assert(isa<BranchInst>(Exit->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2752, __extension__ __PRETTY_FUNCTION__))
2752 "Exit block must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2752, __extension__ __PRETTY_FUNCTION__))
;
2753 assert(Exit->getSingleSuccessor() == After &&(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2754, __extension__ __PRETTY_FUNCTION__))
2754 "Exit block must jump to after block")(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2754, __extension__ __PRETTY_FUNCTION__))
;
2755
2756 assert(After)(static_cast <bool> (After) ? void (0) : __assert_fail (
"After", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2756, __extension__ __PRETTY_FUNCTION__))
;
2757 assert(After->getSinglePredecessor() == Exit &&(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2758, __extension__ __PRETTY_FUNCTION__))
2758 "After block only reachable from exit block")(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2758, __extension__ __PRETTY_FUNCTION__))
;
2759 assert(After->empty() || !isa<PHINode>(After->front()))(static_cast <bool> (After->empty() || !isa<PHINode
>(After->front())) ? void (0) : __assert_fail ("After->empty() || !isa<PHINode>(After->front())"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2759, __extension__ __PRETTY_FUNCTION__))
;
2760
2761 Instruction *IndVar = getIndVar();
2762 assert(IndVar && "Canonical induction variable not found?")(static_cast <bool> (IndVar && "Canonical induction variable not found?"
) ? void (0) : __assert_fail ("IndVar && \"Canonical induction variable not found?\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2762, __extension__ __PRETTY_FUNCTION__))
;
2763 assert(isa<IntegerType>(IndVar->getType()) &&(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2764, __extension__ __PRETTY_FUNCTION__))
2764 "Induction variable must be an integer")(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2764, __extension__ __PRETTY_FUNCTION__))
;
2765 assert(cast<PHINode>(IndVar)->getParent() == Header &&(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2766, __extension__ __PRETTY_FUNCTION__))
2766 "Induction variable must be a PHI in the loop header")(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2766, __extension__ __PRETTY_FUNCTION__))
;
2767 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(0) == Preheader) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2767, __extension__ __PRETTY_FUNCTION__))
;
2768 assert((static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2769, __extension__ __PRETTY_FUNCTION__))
2769 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())(static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2769, __extension__ __PRETTY_FUNCTION__))
;
2770 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(1) == Latch) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2770, __extension__ __PRETTY_FUNCTION__))
;
2771
2772 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1);
2773 assert(cast<Instruction>(NextIndVar)->getParent() == Latch)(static_cast <bool> (cast<Instruction>(NextIndVar
)->getParent() == Latch) ? void (0) : __assert_fail ("cast<Instruction>(NextIndVar)->getParent() == Latch"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2773, __extension__ __PRETTY_FUNCTION__))
;
2774 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOpcode() == BinaryOperator::Add) ? void (0) : __assert_fail
("cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2774, __extension__ __PRETTY_FUNCTION__))
;
2775 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOperand(0) == IndVar) ? void (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2775, __extension__ __PRETTY_FUNCTION__))
;
2776 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2777, __extension__ __PRETTY_FUNCTION__))
2777 ->isOne())(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2777, __extension__ __PRETTY_FUNCTION__))
;
2778
2779 Value *TripCount = getTripCount();
2780 assert(TripCount && "Loop trip count not found?")(static_cast <bool> (TripCount && "Loop trip count not found?"
) ? void (0) : __assert_fail ("TripCount && \"Loop trip count not found?\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2780, __extension__ __PRETTY_FUNCTION__))
;
2781 assert(IndVar->getType() == TripCount->getType() &&(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2782, __extension__ __PRETTY_FUNCTION__))
2782 "Trip count and induction variable must have the same type")(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2782, __extension__ __PRETTY_FUNCTION__))
;
2783
2784 auto *CmpI = cast<CmpInst>(&Cond->front());
2785 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2786, __extension__ __PRETTY_FUNCTION__))
2786 "Exit condition must be a signed less-than comparison")(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2786, __extension__ __PRETTY_FUNCTION__))
;
2787 assert(CmpI->getOperand(0) == IndVar &&(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2788, __extension__ __PRETTY_FUNCTION__))
2788 "Exit condition must compare the induction variable")(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2788, __extension__ __PRETTY_FUNCTION__))
;
2789 assert(CmpI->getOperand(1) == TripCount &&(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2790, __extension__ __PRETTY_FUNCTION__))
2790 "Exit condition must compare with the trip count")(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2790, __extension__ __PRETTY_FUNCTION__))
;
2791#endif
2792}

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0)
: __assert_fail ("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 163, __extension__ __PRETTY_FUNCTION__))
;
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
33
Null pointer value stored to field 'BB'
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 194, __extension__ __PRETTY_FUNCTION__))
;
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
31
Taking false branch
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
32
Calling 'IRBuilderBase::ClearInsertionPoint'
34
Returning from 'IRBuilderBase::ClearInsertionPoint'
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 320, __extension__ __PRETTY_FUNCTION__))
;
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 329, __extension__ __PRETTY_FUNCTION__))
;
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!"
) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 345, __extension__ __PRETTY_FUNCTION__))
;
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *
651 CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
652 MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false,
653 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
654 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
655
656 /// Create and insert an element unordered-atomic memcpy between the
657 /// specified pointers.
658 ///
659 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
660 ///
661 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
662 /// specified, it will be added to the instruction. Likewise with alias.scope
663 /// and noalias tags.
664 CallInst *CreateElementUnorderedAtomicMemCpy(
665 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
666 uint32_t ElementSize, MDNode *TBAATag = nullptr,
667 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
668 MDNode *NoAliasTag = nullptr);
669
670 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
671 MaybeAlign SrcAlign, uint64_t Size,
672 bool isVolatile = false, MDNode *TBAATag = nullptr,
673 MDNode *ScopeTag = nullptr,
674 MDNode *NoAliasTag = nullptr) {
675 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
676 isVolatile, TBAATag, ScopeTag, NoAliasTag);
677 }
678
679 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
680 MaybeAlign SrcAlign, Value *Size,
681 bool isVolatile = false, MDNode *TBAATag = nullptr,
682 MDNode *ScopeTag = nullptr,
683 MDNode *NoAliasTag = nullptr);
684
685 /// \brief Create and insert an element unordered-atomic memmove between the
686 /// specified pointers.
687 ///
688 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
689 /// respectively.
690 ///
691 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
692 /// specified, it will be added to the instruction. Likewise with alias.scope
693 /// and noalias tags.
694 CallInst *CreateElementUnorderedAtomicMemMove(
695 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
696 uint32_t ElementSize, MDNode *TBAATag = nullptr,
697 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
698 MDNode *NoAliasTag = nullptr);
699
700 /// Create a vector fadd reduction intrinsic of the source vector.
701 /// The first parameter is a scalar accumulator value for ordered reductions.
702 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
703
704 /// Create a vector fmul reduction intrinsic of the source vector.
705 /// The first parameter is a scalar accumulator value for ordered reductions.
706 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
707
708 /// Create a vector int add reduction intrinsic of the source vector.
709 CallInst *CreateAddReduce(Value *Src);
710
711 /// Create a vector int mul reduction intrinsic of the source vector.
712 CallInst *CreateMulReduce(Value *Src);
713
714 /// Create a vector int AND reduction intrinsic of the source vector.
715 CallInst *CreateAndReduce(Value *Src);
716
717 /// Create a vector int OR reduction intrinsic of the source vector.
718 CallInst *CreateOrReduce(Value *Src);
719
720 /// Create a vector int XOR reduction intrinsic of the source vector.
721 CallInst *CreateXorReduce(Value *Src);
722
723 /// Create a vector integer max reduction intrinsic of the source
724 /// vector.
725 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
726
727 /// Create a vector integer min reduction intrinsic of the source
728 /// vector.
729 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
730
731 /// Create a vector float max reduction intrinsic of the source
732 /// vector.
733 CallInst *CreateFPMaxReduce(Value *Src);
734
735 /// Create a vector float min reduction intrinsic of the source
736 /// vector.
737 CallInst *CreateFPMinReduce(Value *Src);
738
739 /// Create a lifetime.start intrinsic.
740 ///
741 /// If the pointer isn't i8* it will be converted.
742 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
743
744 /// Create a lifetime.end intrinsic.
745 ///
746 /// If the pointer isn't i8* it will be converted.
747 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
748
749 /// Create a call to invariant.start intrinsic.
750 ///
751 /// If the pointer isn't i8* it will be converted.
752 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
753
754 /// Create a call to Masked Load intrinsic
755 CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
756 Value *PassThru = nullptr, const Twine &Name = "");
757
758 /// Create a call to Masked Store intrinsic
759 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
760 Value *Mask);
761
762 /// Create a call to Masked Gather intrinsic
763 CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
764 Value *Mask = nullptr, Value *PassThru = nullptr,
765 const Twine &Name = "");
766
767 /// Create a call to Masked Scatter intrinsic
768 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
769 Value *Mask = nullptr);
770
771 /// Create an assume intrinsic call that allows the optimizer to
772 /// assume that the provided condition will be true.
773 ///
774 /// The optional argument \p OpBundles specifies operand bundles that are
775 /// added to the call instruction.
776 CallInst *CreateAssumption(Value *Cond,
777 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
778
779 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
780 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
781 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
782 return CreateNoAliasScopeDeclaration(
783 MetadataAsValue::get(Context, ScopeTag));
784 }
785
786 /// Create a call to the experimental.gc.statepoint intrinsic to
787 /// start a new statepoint sequence.
788 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
789 Value *ActualCallee,
790 ArrayRef<Value *> CallArgs,
791 Optional<ArrayRef<Value *>> DeoptArgs,
792 ArrayRef<Value *> GCArgs,
793 const Twine &Name = "");
794
795 /// Create a call to the experimental.gc.statepoint intrinsic to
796 /// start a new statepoint sequence.
797 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
798 Value *ActualCallee, uint32_t Flags,
799 ArrayRef<Value *> CallArgs,
800 Optional<ArrayRef<Use>> TransitionArgs,
801 Optional<ArrayRef<Use>> DeoptArgs,
802 ArrayRef<Value *> GCArgs,
803 const Twine &Name = "");
804
805 /// Conveninence function for the common case when CallArgs are filled
806 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
807 /// .get()'ed to get the Value pointer.
808 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
809 Value *ActualCallee, ArrayRef<Use> CallArgs,
810 Optional<ArrayRef<Value *>> DeoptArgs,
811 ArrayRef<Value *> GCArgs,
812 const Twine &Name = "");
813
814 /// Create an invoke to the experimental.gc.statepoint intrinsic to
815 /// start a new statepoint sequence.
816 InvokeInst *
817 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
818 Value *ActualInvokee, BasicBlock *NormalDest,
819 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
820 Optional<ArrayRef<Value *>> DeoptArgs,
821 ArrayRef<Value *> GCArgs, const Twine &Name = "");
822
823 /// Create an invoke to the experimental.gc.statepoint intrinsic to
824 /// start a new statepoint sequence.
825 InvokeInst *CreateGCStatepointInvoke(
826 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
827 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
828 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
829 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
830 const Twine &Name = "");
831
832 // Convenience function for the common case when CallArgs are filled in using
833 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
834 // get the Value *.
835 InvokeInst *
836 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
837 Value *ActualInvokee, BasicBlock *NormalDest,
838 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
839 Optional<ArrayRef<Value *>> DeoptArgs,
840 ArrayRef<Value *> GCArgs, const Twine &Name = "");
841
842 /// Create a call to the experimental.gc.result intrinsic to extract
843 /// the result from a call wrapped in a statepoint.
844 CallInst *CreateGCResult(Instruction *Statepoint,
845 Type *ResultType,
846 const Twine &Name = "");
847
848 /// Create a call to the experimental.gc.relocate intrinsics to
849 /// project the relocated value of one pointer from the statepoint.
850 CallInst *CreateGCRelocate(Instruction *Statepoint,
851 int BaseOffset,
852 int DerivedOffset,
853 Type *ResultType,
854 const Twine &Name = "");
855
856 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
857 /// base pointer for the specified derived pointer.
858 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
859
860 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
861 /// the offset of the specified derived pointer from its base.
862 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
863
864 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
865 /// will be the same type as that of \p Scaling.
866 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
867
868 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
869 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
870
871 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
872 /// type.
873 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
874 Instruction *FMFSource = nullptr,
875 const Twine &Name = "");
876
877 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
878 /// first type.
879 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
880 Instruction *FMFSource = nullptr,
881 const Twine &Name = "");
882
883 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
884 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
885 /// the intrinsic.
886 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
887 ArrayRef<Value *> Args,
888 Instruction *FMFSource = nullptr,
889 const Twine &Name = "");
890
891 /// Create call to the minnum intrinsic.
892 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
893 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
894 }
895
896 /// Create call to the maxnum intrinsic.
897 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
898 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
899 }
900
901 /// Create call to the minimum intrinsic.
902 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
903 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
904 }
905
906 /// Create call to the maximum intrinsic.
907 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
908 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
909 }
910
911 /// Create a call to the arithmetic_fence intrinsic.
912 CallInst *CreateArithmeticFence(Value *Val, Type *DstType,
913 const Twine &Name = "") {
914 return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr,
915 Name);
916 }
917
918 /// Create a call to the experimental.vector.extract intrinsic.
919 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
920 const Twine &Name = "") {
921 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
922 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
923 Name);
924 }
925
926 /// Create a call to the experimental.vector.insert intrinsic.
927 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
928 Value *Idx, const Twine &Name = "") {
929 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
930 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
931 nullptr, Name);
932 }
933
934private:
935 /// Create a call to a masked intrinsic with given Id.
936 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
937 ArrayRef<Type *> OverloadedTypes,
938 const Twine &Name = "");
939
940 Value *getCastedInt8PtrValue(Value *Ptr);
941
942 //===--------------------------------------------------------------------===//
943 // Instruction creation methods: Terminators
944 //===--------------------------------------------------------------------===//
945
946private:
947 /// Helper to add branch weight and unpredictable metadata onto an
948 /// instruction.
949 /// \returns The annotated instruction.
950 template <typename InstTy>
951 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
952 if (Weights)
953 I->setMetadata(LLVMContext::MD_prof, Weights);
954 if (Unpredictable)
955 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
956 return I;
957 }
958
959public:
960 /// Create a 'ret void' instruction.
961 ReturnInst *CreateRetVoid() {
962 return Insert(ReturnInst::Create(Context));
963 }
964
965 /// Create a 'ret <val>' instruction.
966 ReturnInst *CreateRet(Value *V) {
967 return Insert(ReturnInst::Create(Context, V));
968 }
969
970 /// Create a sequence of N insertvalue instructions,
971 /// with one Value from the retVals array each, that build a aggregate
972 /// return value one value at a time, and a ret instruction to return
973 /// the resulting aggregate value.
974 ///
975 /// This is a convenience function for code that uses aggregate return values
976 /// as a vehicle for having multiple return values.
977 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
978 Value *V = UndefValue::get(getCurrentFunctionReturnType());
979 for (unsigned i = 0; i != N; ++i)
980 V = CreateInsertValue(V, retVals[i], i, "mrv");
981 return Insert(ReturnInst::Create(Context, V));
982 }
983
984 /// Create an unconditional 'br label X' instruction.
985 BranchInst *CreateBr(BasicBlock *Dest) {
986 return Insert(BranchInst::Create(Dest));
987 }
988
989 /// Create a conditional 'br Cond, TrueDest, FalseDest'
990 /// instruction.
991 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
992 MDNode *BranchWeights = nullptr,
993 MDNode *Unpredictable = nullptr) {
994 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
995 BranchWeights, Unpredictable));
996 }
997
998 /// Create a conditional 'br Cond, TrueDest, FalseDest'
999 /// instruction. Copy branch meta data if available.
1000 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1001 Instruction *MDSrc) {
1002 BranchInst *Br = BranchInst::Create(True, False, Cond);
1003 if (MDSrc) {
1004 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
1005 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
1006 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
1007 }
1008 return Insert(Br);
1009 }
1010
1011 /// Create a switch instruction with the specified value, default dest,
1012 /// and with a hint for the number of cases that will be added (for efficient
1013 /// allocation).
1014 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1015 MDNode *BranchWeights = nullptr,
1016 MDNode *Unpredictable = nullptr) {
1017 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1018 BranchWeights, Unpredictable));
1019 }
1020
1021 /// Create an indirect branch instruction with the specified address
1022 /// operand, with an optional hint for the number of destinations that will be
1023 /// added (for efficient allocation).
1024 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1025 return Insert(IndirectBrInst::Create(Addr, NumDests));
1026 }
1027
1028 /// Create an invoke instruction.
1029 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1030 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1031 ArrayRef<Value *> Args,
1032 ArrayRef<OperandBundleDef> OpBundles,
1033 const Twine &Name = "") {
1034 InvokeInst *II =
1035 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1036 if (IsFPConstrained)
1037 setConstrainedFPCallAttr(II);
1038 return Insert(II, Name);
1039 }
1040 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1041 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1042 ArrayRef<Value *> Args = None,
1043 const Twine &Name = "") {
1044 InvokeInst *II =
1045 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1046 if (IsFPConstrained)
1047 setConstrainedFPCallAttr(II);
1048 return Insert(II, Name);
1049 }
1050
1051 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1052 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1053 ArrayRef<OperandBundleDef> OpBundles,
1054 const Twine &Name = "") {
1055 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1056 NormalDest, UnwindDest, Args, OpBundles, Name);
1057 }
1058
1059 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1060 BasicBlock *UnwindDest,
1061 ArrayRef<Value *> Args = None,
1062 const Twine &Name = "") {
1063 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1064 NormalDest, UnwindDest, Args, Name);
1065 }
1066
1067 /// \brief Create a callbr instruction.
1068 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1069 BasicBlock *DefaultDest,
1070 ArrayRef<BasicBlock *> IndirectDests,
1071 ArrayRef<Value *> Args = None,
1072 const Twine &Name = "") {
1073 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1074 Args), Name);
1075 }
1076 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1077 BasicBlock *DefaultDest,
1078 ArrayRef<BasicBlock *> IndirectDests,
1079 ArrayRef<Value *> Args,
1080 ArrayRef<OperandBundleDef> OpBundles,
1081 const Twine &Name = "") {
1082 return Insert(
1083 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1084 OpBundles), Name);
1085 }
1086
1087 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1088 ArrayRef<BasicBlock *> IndirectDests,
1089 ArrayRef<Value *> Args = None,
1090 const Twine &Name = "") {
1091 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1092 DefaultDest, IndirectDests, Args, Name);
1093 }
1094 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1095 ArrayRef<BasicBlock *> IndirectDests,
1096 ArrayRef<Value *> Args,
1097 ArrayRef<OperandBundleDef> OpBundles,
1098 const Twine &Name = "") {
1099 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1100 DefaultDest, IndirectDests, Args, Name);
1101 }
1102
1103 ResumeInst *CreateResume(Value *Exn) {
1104 return Insert(ResumeInst::Create(Exn));
1105 }
1106
1107 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1108 BasicBlock *UnwindBB = nullptr) {
1109 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1110 }
1111
1112 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1113 unsigned NumHandlers,
1114 const Twine &Name = "") {
1115 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1116 Name);
1117 }
1118
1119 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1120 const Twine &Name = "") {
1121 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1122 }
1123
1124 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1125 ArrayRef<Value *> Args = None,
1126 const Twine &Name = "") {
1127 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1128 }
1129
1130 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1131 return Insert(CatchReturnInst::Create(CatchPad, BB));
1132 }
1133
1134 UnreachableInst *CreateUnreachable() {
1135 return Insert(new UnreachableInst(Context));
1136 }
1137
1138 //===--------------------------------------------------------------------===//
1139 // Instruction creation methods: Binary Operators
1140 //===--------------------------------------------------------------------===//
1141private:
1142 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1143 Value *LHS, Value *RHS,
1144 const Twine &Name,
1145 bool HasNUW, bool HasNSW) {
1146 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1147 if (HasNUW) BO->setHasNoUnsignedWrap();
1148 if (HasNSW) BO->setHasNoSignedWrap();
1149 return BO;
1150 }
1151
1152 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1153 FastMathFlags FMF) const {
1154 if (!FPMD)
1155 FPMD = DefaultFPMathTag;
1156 if (FPMD)
1157 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1158 I->setFastMathFlags(FMF);
1159 return I;
1160 }
1161
1162 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1163 Value *R, const Twine &Name) const {
1164 auto *LC = dyn_cast<Constant>(L);
1165 auto *RC = dyn_cast<Constant>(R);
1166 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1167 }
1168
1169 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1170 RoundingMode UseRounding = DefaultConstrainedRounding;
1171
1172 if (Rounding.hasValue())
1173 UseRounding = Rounding.getValue();
1174
1175 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1176 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
;
1177 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1178
1179 return MetadataAsValue::get(Context, RoundingMDS);
1180 }
1181
1182 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1183 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1184
1185 if (Except.hasValue())
1186 UseExcept = Except.getValue();
1187
1188 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1189 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1191
1192 return MetadataAsValue::get(Context, ExceptMDS);
1193 }
1194
1195 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1196 assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1199, __extension__ __PRETTY_FUNCTION__))
1197 Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1199, __extension__ __PRETTY_FUNCTION__))
1198 Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1199, __extension__ __PRETTY_FUNCTION__))
1199 "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1199, __extension__ __PRETTY_FUNCTION__))
;
1200
1201 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1202 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1203
1204 return MetadataAsValue::get(Context, PredicateMDS);
1205 }
1206
1207public:
1208 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1209 bool HasNUW = false, bool HasNSW = false) {
1210 if (auto *LC = dyn_cast<Constant>(LHS))
1211 if (auto *RC = dyn_cast<Constant>(RHS))
1212 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1213 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1214 HasNUW, HasNSW);
1215 }
1216
1217 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1218 return CreateAdd(LHS, RHS, Name, false, true);
1219 }
1220
1221 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1222 return CreateAdd(LHS, RHS, Name, true, false);
1223 }
1224
1225 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1226 bool HasNUW = false, bool HasNSW = false) {
1227 if (auto *LC = dyn_cast<Constant>(LHS))
1228 if (auto *RC = dyn_cast<Constant>(RHS))
1229 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1230 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1231 HasNUW, HasNSW);
1232 }
1233
1234 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1235 return CreateSub(LHS, RHS, Name, false, true);
1236 }
1237
1238 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1239 return CreateSub(LHS, RHS, Name, true, false);
1240 }
1241
1242 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1243 bool HasNUW = false, bool HasNSW = false) {
1244 if (auto *LC = dyn_cast<Constant>(LHS))
1245 if (auto *RC = dyn_cast<Constant>(RHS))
1246 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1247 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1248 HasNUW, HasNSW);
1249 }
1250
1251 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1252 return CreateMul(LHS, RHS, Name, false, true);
1253 }
1254
1255 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1256 return CreateMul(LHS, RHS, Name, true, false);
1257 }
1258
1259 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1260 bool isExact = false) {
1261 if (auto *LC = dyn_cast<Constant>(LHS))
1262 if (auto *RC = dyn_cast<Constant>(RHS))
1263 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1264 if (!isExact)
1265 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1266 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1267 }
1268
1269 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1270 return CreateUDiv(LHS, RHS, Name, true);
1271 }
1272
1273 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1274 bool isExact = false) {
1275 if (auto *LC = dyn_cast<Constant>(LHS))
1276 if (auto *RC = dyn_cast<Constant>(RHS))
1277 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1278 if (!isExact)
1279 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1280 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1281 }
1282
1283 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1284 return CreateSDiv(LHS, RHS, Name, true);
1285 }
1286
1287 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1288 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1289 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1290 }
1291
1292 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1293 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1294 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1295 }
1296
1297 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1298 bool HasNUW = false, bool HasNSW = false) {
1299 if (auto *LC = dyn_cast<Constant>(LHS))
1300 if (auto *RC = dyn_cast<Constant>(RHS))
1301 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1302 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1303 HasNUW, HasNSW);
1304 }
1305
1306 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1307 bool HasNUW = false, bool HasNSW = false) {
1308 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1309 HasNUW, HasNSW);
1310 }
1311
1312 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1313 bool HasNUW = false, bool HasNSW = false) {
1314 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1315 HasNUW, HasNSW);
1316 }
1317
1318 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1319 bool isExact = false) {
1320 if (auto *LC = dyn_cast<Constant>(LHS))
1321 if (auto *RC = dyn_cast<Constant>(RHS))
1322 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1323 if (!isExact)
1324 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1325 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1326 }
1327
1328 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1329 bool isExact = false) {
1330 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1331 }
1332
1333 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1334 bool isExact = false) {
1335 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1336 }
1337
1338 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1339 bool isExact = false) {
1340 if (auto *LC = dyn_cast<Constant>(LHS))
1341 if (auto *RC = dyn_cast<Constant>(RHS))
1342 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1343 if (!isExact)
1344 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1345 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1346 }
1347
1348 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1349 bool isExact = false) {
1350 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1351 }
1352
1353 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1354 bool isExact = false) {
1355 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1356 }
1357
1358 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1359 if (auto *RC = dyn_cast<Constant>(RHS)) {
1360 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1361 return LHS; // LHS & -1 -> LHS
1362 if (auto *LC = dyn_cast<Constant>(LHS))
1363 return Insert(Folder.CreateAnd(LC, RC), Name);
1364 }
1365 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1366 }
1367
1368 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1369 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1370 }
1371
1372 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1373 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1374 }
1375
1376 Value *CreateAnd(ArrayRef<Value*> Ops) {
1377 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
;
1378 Value *Accum = Ops[0];
1379 for (unsigned i = 1; i < Ops.size(); i++)
1380 Accum = CreateAnd(Accum, Ops[i]);
1381 return Accum;
1382 }
1383
1384 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1385 if (auto *RC = dyn_cast<Constant>(RHS)) {
1386 if (RC->isNullValue())
1387 return LHS; // LHS | 0 -> LHS
1388 if (auto *LC = dyn_cast<Constant>(LHS))
1389 return Insert(Folder.CreateOr(LC, RC), Name);
1390 }
1391 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1392 }
1393
1394 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1395 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1396 }
1397
1398 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1399 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1400 }
1401
1402 Value *CreateOr(ArrayRef<Value*> Ops) {
1403 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1403, __extension__ __PRETTY_FUNCTION__))
;
1404 Value *Accum = Ops[0];
1405 for (unsigned i = 1; i < Ops.size(); i++)
1406 Accum = CreateOr(Accum, Ops[i]);
1407 return Accum;
1408 }
1409
1410 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1411 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1412 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1413 }
1414
1415 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1416 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1417 }
1418
1419 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1420 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1421 }
1422
1423 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1424 MDNode *FPMD = nullptr) {
1425 if (IsFPConstrained)
1426 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1427 L, R, nullptr, Name, FPMD);
1428
1429 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1430 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1431 return Insert(I, Name);
1432 }
1433
1434 /// Copy fast-math-flags from an instruction rather than using the builder's
1435 /// default FMF.
1436 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1437 const Twine &Name = "") {
1438 if (IsFPConstrained)
1439 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1440 L, R, FMFSource, Name);
1441
1442 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1443 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1444 FMFSource->getFastMathFlags());
1445 return Insert(I, Name);
1446 }
1447
1448 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1449 MDNode *FPMD = nullptr) {
1450 if (IsFPConstrained)
1451 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1452 L, R, nullptr, Name, FPMD);
1453
1454 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1455 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1456 return Insert(I, Name);
1457 }
1458
1459 /// Copy fast-math-flags from an instruction rather than using the builder's
1460 /// default FMF.
1461 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1462 const Twine &Name = "") {
1463 if (IsFPConstrained)
1464 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1465 L, R, FMFSource, Name);
1466
1467 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1468 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1469 FMFSource->getFastMathFlags());
1470 return Insert(I, Name);
1471 }
1472
1473 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1474 MDNode *FPMD = nullptr) {
1475 if (IsFPConstrained)
1476 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1477 L, R, nullptr, Name, FPMD);
1478
1479 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1480 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1481 return Insert(I, Name);
1482 }
1483
1484 /// Copy fast-math-flags from an instruction rather than using the builder's
1485 /// default FMF.
1486 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1487 const Twine &Name = "") {
1488 if (IsFPConstrained)
1489 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1490 L, R, FMFSource, Name);
1491
1492 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1493 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1494 FMFSource->getFastMathFlags());
1495 return Insert(I, Name);
1496 }
1497
1498 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1499 MDNode *FPMD = nullptr) {
1500 if (IsFPConstrained)
1501 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1502 L, R, nullptr, Name, FPMD);
1503
1504 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1505 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1506 return Insert(I, Name);
1507 }
1508
1509 /// Copy fast-math-flags from an instruction rather than using the builder's
1510 /// default FMF.
1511 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1512 const Twine &Name = "") {
1513 if (IsFPConstrained)
1514 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1515 L, R, FMFSource, Name);
1516
1517 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1518 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1519 FMFSource->getFastMathFlags());
1520 return Insert(I, Name);
1521 }
1522
1523 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1524 MDNode *FPMD = nullptr) {
1525 if (IsFPConstrained)
1526 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1527 L, R, nullptr, Name, FPMD);
1528
1529 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1530 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1531 return Insert(I, Name);
1532 }
1533
1534 /// Copy fast-math-flags from an instruction rather than using the builder's
1535 /// default FMF.
1536 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1537 const Twine &Name = "") {
1538 if (IsFPConstrained)
1539 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1540 L, R, FMFSource, Name);
1541
1542 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1543 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1544 FMFSource->getFastMathFlags());
1545 return Insert(I, Name);
1546 }
1547
1548 Value *CreateBinOp(Instruction::BinaryOps Opc,
1549 Value *LHS, Value *RHS, const Twine &Name = "",
1550 MDNode *FPMathTag = nullptr) {
1551 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1552 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1553 if (isa<FPMathOperator>(BinOp))
1554 setFPAttrs(BinOp, FPMathTag, FMF);
1555 return Insert(BinOp, Name);
1556 }
1557
1558 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1559 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1559, __extension__ __PRETTY_FUNCTION__))
;
1560 return CreateSelect(Cond1, Cond2,
1561 ConstantInt::getNullValue(Cond2->getType()), Name);
1562 }
1563
1564 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1565 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 1565, __extension__ __PRETTY_FUNCTION__))
;
1566 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1567 Cond2, Name);
1568 }
1569
1570 CallInst *CreateConstrainedFPBinOp(
1571 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1572 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1573 Optional<RoundingMode> Rounding = None,
1574 Optional<fp::ExceptionBehavior> Except = None);
1575
1576 Value *CreateNeg(Value *V, const Twine &Name = "",
1577 bool HasNUW = false, bool HasNSW = false) {
1578 if (auto *VC = dyn_cast<Constant>(V))
1579 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1580 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1581 if (HasNUW) BO->setHasNoUnsignedWrap();
1582 if (HasNSW) BO->setHasNoSignedWrap();
1583 return BO;
1584 }
1585
1586 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1587 return CreateNeg(V, Name, false, true);
1588 }
1589
1590 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1591 return CreateNeg(V, Name, true, false);
1592 }
1593
1594 Value *CreateFNeg(Value *V, const Twine &Name = "",
1595 MDNode *FPMathTag = nullptr) {
1596 if (auto *VC = dyn_cast<Constant>(V))
1597 return Insert(Folder.CreateFNeg(VC), Name);
1598 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1599 Name);
1600 }
1601
1602 /// Copy fast-math-flags from an instruction rather than using the builder's
1603 /// default FMF.
1604 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1605 const Twine &Name = "") {
1606 if (auto *VC = dyn_cast<Constant>(V))
1607 return Insert(Folder.CreateFNeg(VC), Name);
1608 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1609 FMFSource->getFastMathFlags()),
1610 Name);
1611 }
1612
1613 Value *CreateNot(Value *V, const Twine &Name = "") {
1614 if (auto *VC = dyn_cast<Constant>(V))
1615 return Insert(Folder.CreateNot(VC), Name);
1616 return Insert(BinaryOperator::CreateNot(V), Name);
1617 }
1618
1619 Value *CreateUnOp(Instruction::UnaryOps Opc,
1620 Value *V, const Twine &Name = "",
1621 MDNode *FPMathTag = nullptr) {
1622 if (auto *VC = dyn_cast<Constant>(V))
1623 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1624 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1625 if (isa<FPMathOperator>(UnOp))
1626 setFPAttrs(UnOp, FPMathTag, FMF);
1627 return Insert(UnOp, Name);
1628 }
1629
1630 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1631 /// Correct number of operands must be passed accordingly.
1632 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1633 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1634
1635 //===--------------------------------------------------------------------===//
1636 // Instruction creation methods: Memory Instructions
1637 //===--------------------------------------------------------------------===//
1638
1639 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1640 Value *ArraySize = nullptr, const Twine &Name = "") {
1641 const DataLayout &DL = BB->getModule()->getDataLayout();
1642 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1643 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1644 }
1645
1646 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1647 const Twine &Name = "") {
1648 const DataLayout &DL = BB->getModule()->getDataLayout();
1649 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1650 unsigned AddrSpace = DL.getAllocaAddrSpace();
1651 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1652 }
1653
1654 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1655 /// converting the string to 'bool' for the isVolatile parameter.
1656 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1657 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1658 }
1659
1660 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1661 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
37
Calling 'IRBuilderBase::CreateAlignedLoad'
1662 }
1663
1664 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1665 const Twine &Name = "") {
1666 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1667 }
1668
1669 // Deprecated [opaque pointer types]
1670 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1671 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1672 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1673 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1674 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1675 }
1676
1677 // Deprecated [opaque pointer types]
1678 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1679 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1680 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1681 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1682 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1683 }
1684
1685 // Deprecated [opaque pointer types]
1686 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1687 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1688 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1689 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1690 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1691 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1692 Name);
1693 }
1694
1695 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1696 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1697 }
1698
1699 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1700 const char *Name) {
1701 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1702 }
1703
1704 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1705 const Twine &Name = "") {
1706 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
38
Calling 'IRBuilderBase::CreateAlignedLoad'
1707 }
1708
1709 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1710 bool isVolatile, const Twine &Name = "") {
1711 if (!Align) {
39
Calling 'Optional::operator bool'
47
Returning from 'Optional::operator bool'
48
Taking true branch
1712 const DataLayout &DL = BB->getModule()->getDataLayout();
49
Called C++ object pointer is null
1713 Align = DL.getABITypeAlign(Ty);
1714 }
1715 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1716 }
1717
1718 // Deprecated [opaque pointer types]
1719 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1720 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1721 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1722 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1723 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1724 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1725 Align, Name);
1726 }
1727 // Deprecated [opaque pointer types]
1728 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1729 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1730 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1731 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1732 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1733 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1734 Align, Name);
1735 }
1736 // Deprecated [opaque pointer types]
1737 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1738 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1739 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1740 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1741 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1742 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1743 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1744 Align, isVolatile, Name);
1745 }
1746
1747 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1748 bool isVolatile = false) {
1749 if (!Align) {
1750 const DataLayout &DL = BB->getModule()->getDataLayout();
1751 Align = DL.getABITypeAlign(Val->getType());
1752 }
1753 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1754 }
1755 FenceInst *CreateFence(AtomicOrdering Ordering,
1756 SyncScope::ID SSID = SyncScope::System,
1757 const Twine &Name = "") {
1758 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1759 }
1760
1761 AtomicCmpXchgInst *
1762 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1763 AtomicOrdering SuccessOrdering,
1764 AtomicOrdering FailureOrdering,
1765 SyncScope::ID SSID = SyncScope::System) {
1766 if (!Align) {
1767 const DataLayout &DL = BB->getModule()->getDataLayout();
1768 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1769 }
1770
1771 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1772 FailureOrdering, SSID));
1773 }
1774
1775 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1776 Value *Val, MaybeAlign Align,
1777 AtomicOrdering Ordering,
1778 SyncScope::ID SSID = SyncScope::System) {
1779 if (!Align) {
1780 const DataLayout &DL = BB->getModule()->getDataLayout();
1781 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1782 }
1783
1784 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1785 }
1786
1787 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1788 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1789 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1790 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
{
1791 return CreateGEP(Ptr->getType()->getScalarType()->getPointerElementType(),
1792 Ptr, IdxList, Name);
1793 }
1794
1795 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1796 const Twine &Name = "") {
1797 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1798 // Every index must be constant.
1799 size_t i, e;
1800 for (i = 0, e = IdxList.size(); i != e; ++i)
1801 if (!isa<Constant>(IdxList[i]))
1802 break;
1803 if (i == e)
1804 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1805 }
1806 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1807 }
1808
1809 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1810 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1811 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1812 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
{
1813 return CreateInBoundsGEP(
1814 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1815 Name);
1816 }
1817
1818 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1819 const Twine &Name = "") {
1820 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1821 // Every index must be constant.
1822 size_t i, e;
1823 for (i = 0, e = IdxList.size(); i != e; ++i)
1824 if (!isa<Constant>(IdxList[i]))
1825 break;
1826 if (i == e)
1827 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1828 Name);
1829 }
1830 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1831 }
1832
1833 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1834 if (auto *PC = dyn_cast<Constant>(Ptr))
1835 if (auto *IC = dyn_cast<Constant>(Idx))
1836 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1837 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1838 }
1839
1840 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1841 const Twine &Name = "") {
1842 if (auto *PC = dyn_cast<Constant>(Ptr))
1843 if (auto *IC = dyn_cast<Constant>(Idx))
1844 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1845 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1846 }
1847
1848 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1849 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1850 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1851 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
{
1852 return CreateConstGEP1_32(
1853 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1854 Name);
1855 }
1856
1857 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1858 const Twine &Name = "") {
1859 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1860
1861 if (auto *PC = dyn_cast<Constant>(Ptr))
1862 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1863
1864 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1865 }
1866
1867 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1868 const Twine &Name = "") {
1869 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1870
1871 if (auto *PC = dyn_cast<Constant>(Ptr))
1872 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1873
1874 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1875 }
1876
1877 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1878 const Twine &Name = "") {
1879 Value *Idxs[] = {
1880 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1881 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1882 };
1883
1884 if (auto *PC = dyn_cast<Constant>(Ptr))
1885 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1886
1887 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1888 }
1889
1890 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1891 unsigned Idx1, const Twine &Name = "") {
1892 Value *Idxs[] = {
1893 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1894 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1895 };
1896
1897 if (auto *PC = dyn_cast<Constant>(Ptr))
1898 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1899
1900 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1901 }
1902
1903 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1904 const Twine &Name = "") {
1905 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1906
1907 if (auto *PC = dyn_cast<Constant>(Ptr))
1908 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1909
1910 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1911 }
1912
1913 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1914 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1915 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1916 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
{
1917 return CreateConstGEP1_64(
1918 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1919 Name);
1920 }
1921
1922 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1923 const Twine &Name = "") {
1924 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1925
1926 if (auto *PC = dyn_cast<Constant>(Ptr))
1927 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1928
1929 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1930 }
1931
1932 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1933 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1934 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1935 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
{
1936 return CreateConstInBoundsGEP1_64(
1937 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1938 Name);
1939 }
1940
1941 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1942 const Twine &Name = "") {
1943 Value *Idxs[] = {
1944 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1945 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1946 };
1947
1948 if (auto *PC = dyn_cast<Constant>(Ptr))
1949 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1950
1951 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1952 }
1953
1954 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1955 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1956 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1957 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
{
1958 return CreateConstGEP2_64(
1959 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1960 Idx1, Name);
1961 }
1962
1963 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1964 uint64_t Idx1, const Twine &Name = "") {
1965 Value *Idxs[] = {
1966 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1967 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1968 };
1969
1970 if (auto *PC = dyn_cast<Constant>(Ptr))
1971 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1972
1973 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1974 }
1975
1976 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1977 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1978 uint64_t Idx1, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1979 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
{
1980 return CreateConstInBoundsGEP2_64(
1981 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1982 Idx1, Name);
1983 }
1984
1985 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1986 const Twine &Name = "") {
1987 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1988 }
1989
1990 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1991 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1992 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
{
1993 return CreateConstInBoundsGEP2_32(
1994 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, 0, Idx,
1995 Name);
1996 }
1997
1998 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1999 /// instead of a pointer to array of i8.
2000 ///
2001 /// If no module is given via \p M, it is take from the insertion point basic
2002 /// block.
2003 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
2004 unsigned AddressSpace = 0,
2005 Module *M = nullptr) {
2006 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
2007 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2008 Constant *Indices[] = {Zero, Zero};
2009 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
2010 Indices);
2011 }
2012
2013 //===--------------------------------------------------------------------===//
2014 // Instruction creation methods: Cast/Conversion Operators
2015 //===--------------------------------------------------------------------===//
2016
2017 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
2018 return CreateCast(Instruction::Trunc, V, DestTy, Name);
2019 }
2020
2021 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
2022 return CreateCast(Instruction::ZExt, V, DestTy, Name);
2023 }
2024
2025 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
2026 return CreateCast(Instruction::SExt, V, DestTy, Name);
2027 }
2028
2029 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
2030 /// the value untouched if the type of V is already DestTy.
2031 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
2032 const Twine &Name = "") {
2033 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 2035, __extension__ __PRETTY_FUNCTION__))
2034 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 2035, __extension__ __PRETTY_FUNCTION__))
2035 "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 2035, __extension__ __PRETTY_FUNCTION__))
;
2036 Type *VTy = V->getType();
2037 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2038 return CreateZExt(V, DestTy, Name);
2039 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2040 return CreateTrunc(V, DestTy, Name);
2041 return V;
2042 }
2043
2044 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2045 /// the value untouched if the type of V is already DestTy.
2046 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2047 const Twine &Name = "") {
2048 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 2050, __extension__ __PRETTY_FUNCTION__))
2049 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 2050, __extension__ __PRETTY_FUNCTION__))
2050 "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/IRBuilder.h"
, 2050, __extension__ __PRETTY_FUNCTION__))
;
2051 Type *VTy = V->getType();
2052 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2053 return CreateSExt(V, DestTy, Name);
2054 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2055 return CreateTrunc(V, DestTy, Name);
2056 return V;
2057 }
2058
2059 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2060 if (IsFPConstrained)
2061 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2062 V, DestTy, nullptr, Name);
2063 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2064 }
2065
2066 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2067 if (IsFPConstrained)
2068 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2069 V, DestTy, nullptr, Name);
2070 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2071 }
2072
2073 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2074 if (IsFPConstrained)
2075 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2076 V, DestTy, nullptr, Name);
2077 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2078 }
2079
2080 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2081 if (IsFPConstrained)
2082 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2083 V, DestTy, nullptr, Name);
2084 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2085 }
2086
2087 Value *CreateFPTrunc(Value *V, Type *DestTy,
2088 const Twine &Name = "") {
2089 if (IsFPConstrained)
2090 return CreateConstrainedFPCast(
2091 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2092 Name);
2093 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2094 }
2095
2096 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2097 if (IsFPConstrained)
2098 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2099 V, DestTy, nullptr, Name);
2100 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2101 }
2102
2103 Value *CreatePtrToInt(Value *V, Type *DestTy,
2104 const Twine &Name = "") {
2105 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2106 }
2107
2108 Value *CreateIntToPtr(Value *V, Type *DestTy,
2109 const Twine &Name = "") {
2110 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2111 }
2112
2113 Value *CreateBitCast(Value *V, Type *DestTy,
2114 const Twine &Name = "") {
2115 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2116 }
2117
2118 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2119 const Twine &Name = "") {
2120 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2121 }
2122
2123 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2124 const Twine &Name = "") {
2125 if (V->getType() == DestTy)
2126 return V;
2127 if (auto *VC = dyn_cast<Constant>(V))
2128 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2129 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2130 }
2131
2132 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2133 const Twine &Name = "") {
2134 if (V->getType() == DestTy)
2135 return V;
2136 if (auto *VC = dyn_cast<Constant>(V))
2137 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2138 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2139 }
2140
2141 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2142 const Twine &Name = "") {
2143 if (V->getType() == DestTy)
2144 return V;
2145 if (auto *VC = dyn_cast<Constant>(V))
2146 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2147 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2148 }
2149
2150 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2151 const Twine &Name = "") {
2152 if (V->getType() == DestTy)
2153 return V;
2154 if (auto *VC = dyn_cast<Constant>(V))
2155 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2156 return Insert(CastInst::Create(Op, V, DestTy), Name);
2157 }
2158
2159 Value *CreatePointerCast(Value *V, Type *DestTy,
2160 const Twine &Name = "") {
2161 if (V->getType() == DestTy)
2162 return V;
2163 if (auto *VC = dyn_cast<Constant>(V))
2164 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2165 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2166 }
2167
2168 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2169 const Twine &Name = "") {
2170 if (V->getType() == DestTy)
2171 return V;
2172
2173 if (auto *VC = dyn_cast<Constant>(V)) {
2174 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2175 Name);
2176 }
2177
2178 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2179 Name);
2180 }
2181
2182 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2183 const Twine &Name = "") {
2184 if (V->getType() == DestTy)
2185 return V;
2186 if (auto *VC = dyn_cast<Constant>(V))
2187 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2188 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2189 }
2190
2191 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2192 const Twine &Name = "") {
2193 if (V->getType() == DestTy)
2194 return V;
2195 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2196 return CreatePtrToInt(V, DestTy, Name);
2197 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2198 return CreateIntToPtr(V, DestTy, Name);
2199
2200 return CreateBitCast(V, DestTy, Name);
2201 }
2202
2203 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2204 if (V->getType() == DestTy)
2205 return V;
2206 if (auto *VC = dyn_cast<Constant>(V))
2207 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2208 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2209 }
2210
2211 CallInst *CreateConstrainedFPCast(
2212 Intrinsic::ID ID, Value *V, Type *DestTy,
2213 Instruction *FMFSource = nullptr, const Twine &Name = "",
2214 MDNode *FPMathTag = nullptr,
2215 Optional<RoundingMode> Rounding = None,
2216 Optional<fp::ExceptionBehavior> Except = None);
2217
2218 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2219 // compile time error, instead of converting the string to bool for the
2220 // isSigned parameter.
2221 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2222
2223 //===--------------------------------------------------------------------===//
2224 // Instruction creation methods: Compare Instructions
2225 //===--------------------------------------------------------------------===//
2226
2227 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2228 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2229 }
2230
2231 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2232 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2233 }
2234
2235 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2236 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2237 }
2238
2239 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2240 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2241 }
2242
2243 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2244 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2245 }
2246
2247 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2248 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2249 }
2250
2251 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2252 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2253 }
2254
2255 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2256 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2257 }
2258
2259 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2260 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2261 }
2262
2263 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2264 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2265 }
2266
2267 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2268 MDNode *FPMathTag = nullptr) {
2269 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2270 }
2271
2272 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2273 MDNode *FPMathTag = nullptr) {
2274 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2275 }
2276
2277 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2278 MDNode *FPMathTag = nullptr) {
2279 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2280 }
2281
2282 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2283 MDNode *FPMathTag = nullptr) {
2284 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2285 }
2286
2287 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2288 MDNode *FPMathTag = nullptr) {
2289 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2290 }
2291
2292 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2293 MDNode *FPMathTag = nullptr) {
2294 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2295 }
2296
2297 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2298 MDNode *FPMathTag = nullptr) {
2299 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2300 }
2301
2302 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2303 MDNode *FPMathTag = nullptr) {
2304 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2305 }
2306
2307 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2308 MDNode *FPMathTag = nullptr) {
2309 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2310 }
2311
2312 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2313 MDNode *FPMathTag = nullptr) {
2314 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2315 }
2316
2317 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2318 MDNode *FPMathTag = nullptr) {
2319 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2320 }
2321
2322 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2323 MDNode *FPMathTag = nullptr) {
2324 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2325 }
2326
2327 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2328 MDNode *FPMathTag = nullptr) {
2329 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2330 }
2331
2332 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2333 MDNode *FPMathTag = nullptr) {
2334 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2335 }
2336
2337 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2338 const Twine &Name = "") {
2339 if (auto *LC = dyn_cast<Constant>(LHS))
2340 if (auto *RC = dyn_cast<Constant>(RHS))
2341 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2342 return Insert(new ICmpInst(P, LHS, RHS), Name);
2343 }
2344
2345 // Create a quiet floating-point comparison (i.e. one that raises an FP
2346 // exception only in the case where an input is a signaling NaN).
2347 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2348 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2349 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2350 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2351 }
2352
2353 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2354 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2355 return CmpInst::isFPPredicate(Pred)
2356 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2357 : CreateICmp(Pred, LHS, RHS, Name);
2358 }
2359
2360 // Create a signaling floating-point comparison (i.e. one that raises an FP
2361 // exception whenever an input is any NaN, signaling or quiet).
2362 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2363 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2364 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2365 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2366 }
2367
2368private:
2369 // Helper routine to create either a signaling or a quiet FP comparison.
2370 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2371 const Twine &Name, MDNode *FPMathTag,
2372 bool IsSignaling);
2373
2374public:
2375 CallInst *CreateConstrainedFPCmp(
2376 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2377 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2378
2379 //===--------------------------------------------------------------------===//
2380 // Instruction creation methods: Other Instructions
2381 //===--------------------------------------------------------------------===//
2382
2383 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2384 const Twine &Name = "") {
2385 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2386 if (isa<FPMathOperator>(Phi))
2387 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2388 return Insert(Phi, Name);
2389 }
2390
2391 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2392 ArrayRef<Value *> Args = None, const Twine &Name = "",
2393 MDNode *FPMathTag = nullptr) {
2394 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2395 if (IsFPConstrained)
2396 setConstrainedFPCallAttr(CI);
2397 if (isa<FPMathOperator>(CI))
2398 setFPAttrs(CI, FPMathTag, FMF);
2399 return Insert(CI, Name);
2400 }
2401
2402 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2403 ArrayRef<OperandBundleDef> OpBundles,
2404 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2405 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2406 if (IsFPConstrained)
2407 setConstrainedFPCallAttr(CI);
2408 if (isa<FPMathOperator>(CI))
2409 setFPAttrs(CI, FPMathTag, FMF);
2410 return Insert(CI, Name);
2411 }
2412
2413 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2414 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2415 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2416 FPMathTag);
2417 }
2418
2419 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2420 ArrayRef<OperandBundleDef> OpBundles,
2421 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2422 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2423 OpBundles, Name, FPMathTag);
2424 }
2425
2426 CallInst *CreateConstrainedFPCall(
2427 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2428 Optional<RoundingMode> Rounding = None,
2429 Optional<fp::ExceptionBehavior> Except = None);
2430
2431 Value *CreateSelect(Value *C, Value *True, Value *False,
2432 const Twine &Name = "", Instruction *MDFrom = nullptr);
2433
2434 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2435 return Insert(new VAArgInst(List, Ty), Name);
2436 }
2437
2438 Value *CreateExtractElement(Value *Vec, Value *Idx,
2439 const Twine &Name = "") {
2440 if (auto *VC = dyn_cast<Constant>(Vec))
2441 if (auto *IC = dyn_cast<Constant>(Idx))
2442 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2443 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2444 }
2445
2446 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2447 const Twine &Name = "") {
2448 return CreateExtractElement(Vec, getInt64(Idx), Name);
2449 }
2450
2451 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2452 const Twine &Name = "") {
2453 if (auto *VC = dyn_cast<Constant>(Vec))
2454 if (auto *NC = dyn_cast<Constant>(NewElt))
2455 if (auto *IC = dyn_cast<Constant>(Idx))
2456 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2457 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2458 }
2459
2460 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2461 const Twine &Name = "") {
2462 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2463 }
2464
2465 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2466 const Twine &Name = "") {
2467 SmallVector<int, 16> IntMask;
2468 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2469 return CreateShuffleVector(V1, V2, IntMask, Name);
2470 }
2471
2472 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2473 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2474 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2475 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2476 SmallVector<int, 16> IntMask;
2477 IntMask.assign(Mask.begin(), Mask.end());
2478 return CreateShuffleVector(V1, V2, IntMask, Name);
2479 }
2480
2481 /// See class ShuffleVectorInst for a description of the mask representation.
2482 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2483 const Twine &Name = "") {
2484 if (auto *V1C = dyn_cast<Constant>(V1))
2485 if (auto *V2C = dyn_cast<Constant>(V2))
2486 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2487 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2488 }
2489
2490 /// Create a unary shuffle. The second vector operand of the IR instruction
2491 /// is poison.
2492 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2493 const Twine &Name = "") {
2494 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2495 }
2496
2497 Value *CreateExtractValue(Value *Agg,
2498 ArrayRef<unsigned> Idxs,
2499 const Twine &Name = "") {
2500 if (auto *AggC = dyn_cast<Constant>(Agg))
2501 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2502 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2503 }
2504
2505 Value *CreateInsertValue(Value *Agg, Value *Val,
2506 ArrayRef<unsigned> Idxs,
2507 const Twine &Name = "") {
2508 if (auto *AggC = dyn_cast<Constant>(Agg))
2509 if (auto *ValC = dyn_cast<Constant>(Val))
2510 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2511 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2512 }
2513
2514 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2515 const Twine &Name = "") {
2516 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2517 }
2518
2519 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2520 return Insert(new FreezeInst(V), Name);
2521 }
2522
2523 //===--------------------------------------------------------------------===//
2524 // Utility creation methods
2525 //===--------------------------------------------------------------------===//
2526
2527 /// Return an i1 value testing if \p Arg is null.
2528 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2529 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2530 Name);
2531 }
2532
2533 /// Return an i1 value testing if \p Arg is not null.
2534 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2535 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2536 Name);
2537 }
2538
2539 /// Return the i64 difference between two pointer values, dividing out
2540 /// the size of the pointed-to objects.
2541 ///
2542 /// This is intended to implement C-style pointer subtraction. As such, the
2543 /// pointers must be appropriately aligned for their element types and
2544 /// pointing into the same object.
2545 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2546
2547 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2548 /// different from pointer to i8, it's casted to pointer to i8 in the same
2549 /// address space before call and casted back to Ptr type after call.
2550 Value *CreateLaunderInvariantGroup(Value *Ptr);
2551
2552 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2553 /// different from pointer to i8, it's casted to pointer to i8 in the same
2554 /// address space before call and casted back to Ptr type after call.
2555 Value *CreateStripInvariantGroup(Value *Ptr);
2556
2557 /// Return a vector value that contains the vector V reversed
2558 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2559
2560 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2561 /// return a shufflevector. If the immediate is positive, a vector is
2562 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2563 /// is negative, we extract -Imm elements from V1 and the remaining
2564 /// elements from V2. Imm is a signed integer in the range
2565 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2566 /// source/result vector)
2567 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2568 const Twine &Name = "");
2569
2570 /// Return a vector value that contains \arg V broadcasted to \p
2571 /// NumElts elements.
2572 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2573
2574 /// Return a vector value that contains \arg V broadcasted to \p
2575 /// EC elements.
2576 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2577
2578 /// Return a value that has been extracted from a larger integer type.
2579 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2580 IntegerType *ExtractedTy, uint64_t Offset,
2581 const Twine &Name);
2582
2583 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2584 unsigned Dimension, unsigned LastIndex,
2585 MDNode *DbgInfo);
2586
2587 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2588 MDNode *DbgInfo);
2589
2590 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2591 unsigned Index, unsigned FieldIndex,
2592 MDNode *DbgInfo);
2593
2594private:
2595 /// Helper function that creates an assume intrinsic call that
2596 /// represents an alignment assumption on the provided pointer \p PtrValue
2597 /// with offset \p OffsetValue and alignment value \p AlignValue.
2598 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2599 Value *PtrValue, Value *AlignValue,
2600 Value *OffsetValue);
2601
2602public:
2603 /// Create an assume intrinsic call that represents an alignment
2604 /// assumption on the provided pointer.
2605 ///
2606 /// An optional offset can be provided, and if it is provided, the offset
2607 /// must be subtracted from the provided pointer to get the pointer with the
2608 /// specified alignment.
2609 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2610 unsigned Alignment,
2611 Value *OffsetValue = nullptr);
2612
2613 /// Create an assume intrinsic call that represents an alignment
2614 /// assumption on the provided pointer.
2615 ///
2616 /// An optional offset can be provided, and if it is provided, the offset
2617 /// must be subtracted from the provided pointer to get the pointer with the
2618 /// specified alignment.
2619 ///
2620 /// This overload handles the condition where the Alignment is dependent
2621 /// on an existing value rather than a static value.
2622 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2623 Value *Alignment,
2624 Value *OffsetValue = nullptr);
2625};
2626
2627/// This provides a uniform API for creating instructions and inserting
2628/// them into a basic block: either at the end of a BasicBlock, or at a specific
2629/// iterator location in a block.
2630///
2631/// Note that the builder does not expose the full generality of LLVM
2632/// instructions. For access to extra instruction properties, use the mutators
2633/// (e.g. setVolatile) on the instructions after they have been
2634/// created. Convenience state exists to specify fast-math flags and fp-math
2635/// tags.
2636///
2637/// The first template argument specifies a class to use for creating constants.
2638/// This defaults to creating minimally folded constants. The second template
2639/// argument allows clients to specify custom insertion hooks that are called on
2640/// every newly created insertion.
2641template <typename FolderTy = ConstantFolder,
2642 typename InserterTy = IRBuilderDefaultInserter>
2643class IRBuilder : public IRBuilderBase {
2644private:
2645 FolderTy Folder;
2646 InserterTy Inserter;
2647
2648public:
2649 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2650 MDNode *FPMathTag = nullptr,
2651 ArrayRef<OperandBundleDef> OpBundles = None)
2652 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2653 Folder(Folder), Inserter(Inserter) {}
2654
2655 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2656 ArrayRef<OperandBundleDef> OpBundles = None)
2657 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2658
2659 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2660 MDNode *FPMathTag = nullptr,
2661 ArrayRef<OperandBundleDef> OpBundles = None)
2662 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2663 FPMathTag, OpBundles), Folder(Folder) {
2664 SetInsertPoint(TheBB);
2665 }
2666
2667 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2668 ArrayRef<OperandBundleDef> OpBundles = None)
2669 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2670 FPMathTag, OpBundles) {
2671 SetInsertPoint(TheBB);
2672 }
2673
2674 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2675 ArrayRef<OperandBundleDef> OpBundles = None)
2676 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
2677 FPMathTag, OpBundles) {
2678 SetInsertPoint(IP);
2679 }
2680
2681 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2682 MDNode *FPMathTag = nullptr,
2683 ArrayRef<OperandBundleDef> OpBundles = None)
2684 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2685 FPMathTag, OpBundles), Folder(Folder) {
2686 SetInsertPoint(TheBB, IP);
2687 }
2688
2689 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2690 MDNode *FPMathTag = nullptr,
2691 ArrayRef<OperandBundleDef> OpBundles = None)
2692 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2693 FPMathTag, OpBundles) {
2694 SetInsertPoint(TheBB, IP);
2695 }
2696
2697 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2698 /// or FastMathFlagGuard instead.
2699 IRBuilder(const IRBuilder &) = delete;
2700
2701 InserterTy &getInserter() { return Inserter; }
2702};
2703
2704// Create wrappers for C Binding types (see CBindingWrapping.h).
2705DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2706
2707} // end namespace llvm
2708
2709#endif // LLVM_IR_IRBUILDER_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/Optional.h

1//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides Optional, a template class modeled in the spirit of
10// OCaml's 'opt' variant. The idea is to strongly type whether or not
11// a value can be optional.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ADT_OPTIONAL_H
16#define LLVM_ADT_OPTIONAL_H
17
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLForwardCompat.h"
21#include "llvm/Support/Compiler.h"
22#include "llvm/Support/type_traits.h"
23#include <cassert>
24#include <memory>
25#include <new>
26#include <utility>
27
28namespace llvm {
29
30class raw_ostream;
31
32namespace optional_detail {
33
34/// Storage for any type.
35//
36// The specialization condition intentionally uses
37// llvm::is_trivially_copy_constructible instead of
38// std::is_trivially_copy_constructible. GCC versions prior to 7.4 may
39// instantiate the copy constructor of `T` when
40// std::is_trivially_copy_constructible is instantiated. This causes
41// compilation to fail if we query the trivially copy constructible property of
42// a class which is not copy constructible.
43//
44// The current implementation of OptionalStorage insists that in order to use
45// the trivial specialization, the value_type must be trivially copy
46// constructible and trivially copy assignable due to =default implementations
47// of the copy/move constructor/assignment. It does not follow that this is
48// necessarily the case std::is_trivially_copyable is true (hence the expanded
49// specialization condition).
50//
51// The move constructible / assignable conditions emulate the remaining behavior
52// of std::is_trivially_copyable.
53template <typename T, bool = (llvm::is_trivially_copy_constructible<T>::value &&
54 std::is_trivially_copy_assignable<T>::value &&
55 (std::is_trivially_move_constructible<T>::value ||
56 !std::is_move_constructible<T>::value) &&
57 (std::is_trivially_move_assignable<T>::value ||
58 !std::is_move_assignable<T>::value))>
59class OptionalStorage {
60 union {
61 char empty;
62 T value;
63 };
64 bool hasVal;
65
66public:
67 ~OptionalStorage() { reset(); }
68
69 constexpr OptionalStorage() noexcept : empty(), hasVal(false) {}
70
71 constexpr OptionalStorage(OptionalStorage const &other) : OptionalStorage() {
72 if (other.hasValue()) {
73 emplace(other.value);
74 }
75 }
76 constexpr OptionalStorage(OptionalStorage &&other) : OptionalStorage() {
77 if (other.hasValue()) {
78 emplace(std::move(other.value));
79 }
80 }
81
82 template <class... Args>
83 constexpr explicit OptionalStorage(in_place_t, Args &&... args)
84 : value(std::forward<Args>(args)...), hasVal(true) {}
85
86 void reset() noexcept {
87 if (hasVal) {
88 value.~T();
89 hasVal = false;
90 }
91 }
92
93 constexpr bool hasValue() const noexcept { return hasVal; }
94
95 T &getValue() LLVM_LVALUE_FUNCTION& noexcept {
96 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/Optional.h"
, 96, __extension__ __PRETTY_FUNCTION__))
;
97 return value;
98 }
99 constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept {
100 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/Optional.h"
, 100, __extension__ __PRETTY_FUNCTION__))
;
101 return value;
102 }
103#if LLVM_HAS_RVALUE_REFERENCE_THIS1
104 T &&getValue() && noexcept {
105 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/Optional.h"
, 105, __extension__ __PRETTY_FUNCTION__))
;
106 return std::move(value);
107 }
108#endif
109
110 template <class... Args> void emplace(Args &&... args) {
111 reset();
112 ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
113 hasVal = true;
114 }
115
116 OptionalStorage &operator=(T const &y) {
117 if (hasValue()) {
118 value = y;
119 } else {
120 ::new ((void *)std::addressof(value)) T(y);
121 hasVal = true;
122 }
123 return *this;
124 }
125 OptionalStorage &operator=(T &&y) {
126 if (hasValue()) {
127 value = std::move(y);
128 } else {
129 ::new ((void *)std::addressof(value)) T(std::move(y));
130 hasVal = true;
131 }
132 return *this;
133 }
134
135 OptionalStorage &operator=(OptionalStorage const &other) {
136 if (other.hasValue()) {
137 if (hasValue()) {
138 value = other.value;
139 } else {
140 ::new ((void *)std::addressof(value)) T(other.value);
141 hasVal = true;
142 }
143 } else {
144 reset();
145 }
146 return *this;
147 }
148
149 OptionalStorage &operator=(OptionalStorage &&other) {
150 if (other.hasValue()) {
151 if (hasValue()) {
152 value = std::move(other.value);
153 } else {
154 ::new ((void *)std::addressof(value)) T(std::move(other.value));
155 hasVal = true;
156 }
157 } else {
158 reset();
159 }
160 return *this;
161 }
162};
163
164template <typename T> class OptionalStorage<T, true> {
165 union {
166 char empty;
167 T value;
168 };
169 bool hasVal = false;
170
171public:
172 ~OptionalStorage() = default;
173
174 constexpr OptionalStorage() noexcept : empty{} {}
175
176 constexpr OptionalStorage(OptionalStorage const &other) = default;
177 constexpr OptionalStorage(OptionalStorage &&other) = default;
178
179 OptionalStorage &operator=(OptionalStorage const &other) = default;
180 OptionalStorage &operator=(OptionalStorage &&other) = default;
181
182 template <class... Args>
183 constexpr explicit OptionalStorage(in_place_t, Args &&... args)
184 : value(std::forward<Args>(args)...), hasVal(true) {}
185
186 void reset() noexcept {
187 if (hasVal) {
188 value.~T();
189 hasVal = false;
190 }
191 }
192
193 constexpr bool hasValue() const noexcept { return hasVal; }
42
Returning zero, which participates in a condition later
194
195 T &getValue() LLVM_LVALUE_FUNCTION& noexcept {
196 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/Optional.h"
, 196, __extension__ __PRETTY_FUNCTION__))
;
197 return value;
198 }
199 constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept {
200 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/Optional.h"
, 200, __extension__ __PRETTY_FUNCTION__))
;
201 return value;
202 }
203#if LLVM_HAS_RVALUE_REFERENCE_THIS1
204 T &&getValue() && noexcept {
205 assert(hasVal)(static_cast <bool> (hasVal) ? void (0) : __assert_fail
("hasVal", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/Optional.h"
, 205, __extension__ __PRETTY_FUNCTION__))
;
206 return std::move(value);
207 }
208#endif
209
210 template <class... Args> void emplace(Args &&... args) {
211 reset();
212 ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
213 hasVal = true;
214 }
215
216 OptionalStorage &operator=(T const &y) {
217 if (hasValue()) {
218 value = y;
219 } else {
220 ::new ((void *)std::addressof(value)) T(y);
221 hasVal = true;
222 }
223 return *this;
224 }
225 OptionalStorage &operator=(T &&y) {
226 if (hasValue()) {
227 value = std::move(y);
228 } else {
229 ::new ((void *)std::addressof(value)) T(std::move(y));
230 hasVal = true;
231 }
232 return *this;
233 }
234};
235
236} // namespace optional_detail
237
238template <typename T> class Optional {
239 optional_detail::OptionalStorage<T> Storage;
240
241public:
242 using value_type = T;
243
244 constexpr Optional() {}
245 constexpr Optional(NoneType) {}
246
247 constexpr Optional(const T &y) : Storage(in_place, y) {}
248 constexpr Optional(const Optional &O) = default;
249
250 constexpr Optional(T &&y) : Storage(in_place, std::move(y)) {}
251 constexpr Optional(Optional &&O) = default;
252
253 template <typename... ArgTypes>
254 constexpr Optional(in_place_t, ArgTypes &&...Args)
255 : Storage(in_place, std::forward<ArgTypes>(Args)...) {}
256
257 Optional &operator=(T &&y) {
258 Storage = std::move(y);
259 return *this;
260 }
261 Optional &operator=(Optional &&O) = default;
262
263 /// Create a new object by constructing it in place with the given arguments.
264 template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
265 Storage.emplace(std::forward<ArgTypes>(Args)...);
266 }
267
268 static constexpr Optional create(const T *y) {
269 return y ? Optional(*y) : Optional();
270 }
271
272 Optional &operator=(const T &y) {
273 Storage = y;
274 return *this;
275 }
276 Optional &operator=(const Optional &O) = default;
277
278 void reset() { Storage.reset(); }
279
280 constexpr const T *getPointer() const { return &Storage.getValue(); }
281 T *getPointer() { return &Storage.getValue(); }
282 constexpr const T &getValue() const LLVM_LVALUE_FUNCTION& {
283 return Storage.getValue();
284 }
285 T &getValue() LLVM_LVALUE_FUNCTION& { return Storage.getValue(); }
286
287 constexpr explicit operator bool() const { return hasValue(); }
40
Calling 'Optional::hasValue'
45
Returning from 'Optional::hasValue'
46
Returning zero, which participates in a condition later
288 constexpr bool hasValue() const { return Storage.hasValue(); }
41
Calling 'OptionalStorage::hasValue'
43
Returning from 'OptionalStorage::hasValue'
44
Returning zero, which participates in a condition later
289 constexpr const T *operator->() const { return getPointer(); }
290 T *operator->() { return getPointer(); }
291 constexpr const T &operator*() const LLVM_LVALUE_FUNCTION& {
292 return getValue();
293 }
294 T &operator*() LLVM_LVALUE_FUNCTION& { return getValue(); }
295
296 template <typename U>
297 constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION& {
298 return hasValue() ? getValue() : std::forward<U>(value);
299 }
300
301 /// Apply a function to the value if present; otherwise return None.
302 template <class Function>
303 auto map(const Function &F) const LLVM_LVALUE_FUNCTION&
304 -> Optional<decltype(F(getValue()))> {
305 if (*this) return F(getValue());
306 return None;
307 }
308
309#if LLVM_HAS_RVALUE_REFERENCE_THIS1
310 T &&getValue() && { return std::move(Storage.getValue()); }
311 T &&operator*() && { return std::move(Storage.getValue()); }
312
313 template <typename U>
314 T getValueOr(U &&value) && {
315 return hasValue() ? std::move(getValue()) : std::forward<U>(value);
316 }
317
318 /// Apply a function to the value if present; otherwise return None.
319 template <class Function>
320 auto map(const Function &F) &&
321 -> Optional<decltype(F(std::move(*this).getValue()))> {
322 if (*this) return F(std::move(*this).getValue());
323 return None;
324 }
325#endif
326};
327
328template <class T> llvm::hash_code hash_value(const Optional<T> &O) {
329 return O ? hash_combine(true, *O) : hash_value(false);
330}
331
332template <typename T, typename U>
333constexpr bool operator==(const Optional<T> &X, const Optional<U> &Y) {
334 if (X && Y)
335 return *X == *Y;
336 return X.hasValue() == Y.hasValue();
337}
338
339template <typename T, typename U>
340constexpr bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
341 return !(X == Y);
342}
343
344template <typename T, typename U>
345constexpr bool operator<(const Optional<T> &X, const Optional<U> &Y) {
346 if (X && Y)
347 return *X < *Y;
348 return X.hasValue() < Y.hasValue();
349}
350
351template <typename T, typename U>
352constexpr bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
353 return !(Y < X);
354}
355
356template <typename T, typename U>
357constexpr bool operator>(const Optional<T> &X, const Optional<U> &Y) {
358 return Y < X;
359}
360
361template <typename T, typename U>
362constexpr bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
363 return !(X < Y);
364}
365
366template <typename T>
367constexpr bool operator==(const Optional<T> &X, NoneType) {
368 return !X;
369}
370
371template <typename T>
372constexpr bool operator==(NoneType, const Optional<T> &X) {
373 return X == None;
374}
375
376template <typename T>
377constexpr bool operator!=(const Optional<T> &X, NoneType) {
378 return !(X == None);
379}
380
381template <typename T>
382constexpr bool operator!=(NoneType, const Optional<T> &X) {
383 return X != None;
384}
385
386template <typename T> constexpr bool operator<(const Optional<T> &, NoneType) {
387 return false;
388}
389
390template <typename T> constexpr bool operator<(NoneType, const Optional<T> &X) {
391 return X.hasValue();
392}
393
394template <typename T>
395constexpr bool operator<=(const Optional<T> &X, NoneType) {
396 return !(None < X);
397}
398
399template <typename T>
400constexpr bool operator<=(NoneType, const Optional<T> &X) {
401 return !(X < None);
402}
403
404template <typename T> constexpr bool operator>(const Optional<T> &X, NoneType) {
405 return None < X;
406}
407
408template <typename T> constexpr bool operator>(NoneType, const Optional<T> &X) {
409 return X < None;
410}
411
412template <typename T>
413constexpr bool operator>=(const Optional<T> &X, NoneType) {
414 return None <= X;
415}
416
417template <typename T>
418constexpr bool operator>=(NoneType, const Optional<T> &X) {
419 return X <= None;
420}
421
422template <typename T>
423constexpr bool operator==(const Optional<T> &X, const T &Y) {
424 return X && *X == Y;
425}
426
427template <typename T>
428constexpr bool operator==(const T &X, const Optional<T> &Y) {
429 return Y && X == *Y;
430}
431
432template <typename T>
433constexpr bool operator!=(const Optional<T> &X, const T &Y) {
434 return !(X == Y);
435}
436
437template <typename T>
438constexpr bool operator!=(const T &X, const Optional<T> &Y) {
439 return !(X == Y);
440}
441
442template <typename T>
443constexpr bool operator<(const Optional<T> &X, const T &Y) {
444 return !X || *X < Y;
445}
446
447template <typename T>
448constexpr bool operator<(const T &X, const Optional<T> &Y) {
449 return Y && X < *Y;
450}
451
452template <typename T>
453constexpr bool operator<=(const Optional<T> &X, const T &Y) {
454 return !(Y < X);
455}
456
457template <typename T>
458constexpr bool operator<=(const T &X, const Optional<T> &Y) {
459 return !(Y < X);
460}
461
462template <typename T>
463constexpr bool operator>(const Optional<T> &X, const T &Y) {
464 return Y < X;
465}
466
467template <typename T>
468constexpr bool operator>(const T &X, const Optional<T> &Y) {
469 return Y < X;
470}
471
472template <typename T>
473constexpr bool operator>=(const Optional<T> &X, const T &Y) {
474 return !(X < Y);
475}
476
477template <typename T>
478constexpr bool operator>=(const T &X, const Optional<T> &Y) {
479 return !(X < Y);
480}
481
482raw_ostream &operator<<(raw_ostream &OS, NoneType);
483
484template <typename T, typename = decltype(std::declval<raw_ostream &>()
485 << std::declval<const T &>())>
486raw_ostream &operator<<(raw_ostream &OS, const Optional<T> &O) {
487 if (O)
488 OS << *O;
489 else
490 OS << None;
491 return OS;
492}
493
494} // end namespace llvm
495
496#endif // LLVM_ADT_OPTIONAL_H