Bug Summary

File:llvm/include/llvm/IR/IRBuilder.h
Warning:line 1652, column 28
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name OMPIRBuilder.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Frontend/OpenMP -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Frontend/OpenMP -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp

1//===- OpenMPIRBuilder.cpp - Builder for LLVM-IR for OpenMP directives ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// This file implements the OpenMPIRBuilder class, which is used as a
11/// convenient way to create LLVM instructions for OpenMP directives.
12///
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
16
17#include "llvm/ADT/StringRef.h"
18#include "llvm/ADT/Triple.h"
19#include "llvm/IR/CFG.h"
20#include "llvm/IR/DebugInfo.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/MDBuilder.h"
23#include "llvm/IR/Value.h"
24#include "llvm/Support/CommandLine.h"
25#include "llvm/Support/Error.h"
26#include "llvm/Transforms/Utils/BasicBlockUtils.h"
27#include "llvm/Transforms/Utils/CodeExtractor.h"
28
29#include <sstream>
30
31#define DEBUG_TYPE"openmp-ir-builder" "openmp-ir-builder"
32
33using namespace llvm;
34using namespace omp;
35
36static cl::opt<bool>
37 OptimisticAttributes("openmp-ir-builder-optimistic-attributes", cl::Hidden,
38 cl::desc("Use optimistic attributes describing "
39 "'as-if' properties of runtime calls."),
40 cl::init(false));
41
42void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
43 LLVMContext &Ctx = Fn.getContext();
44
45 // Get the function's current attributes.
46 auto Attrs = Fn.getAttributes();
47 auto FnAttrs = Attrs.getFnAttrs();
48 auto RetAttrs = Attrs.getRetAttrs();
49 SmallVector<AttributeSet, 4> ArgAttrs;
50 for (size_t ArgNo = 0; ArgNo < Fn.arg_size(); ++ArgNo)
51 ArgAttrs.emplace_back(Attrs.getParamAttrs(ArgNo));
52
53#define OMP_ATTRS_SET(VarName, AttrSet) AttributeSet VarName = AttrSet;
54#include "llvm/Frontend/OpenMP/OMPKinds.def"
55
56 // Add attributes to the function declaration.
57 switch (FnID) {
58#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets) \
59 case Enum: \
60 FnAttrs = FnAttrs.addAttributes(Ctx, FnAttrSet); \
61 RetAttrs = RetAttrs.addAttributes(Ctx, RetAttrSet); \
62 for (size_t ArgNo = 0; ArgNo < ArgAttrSets.size(); ++ArgNo) \
63 ArgAttrs[ArgNo] = \
64 ArgAttrs[ArgNo].addAttributes(Ctx, ArgAttrSets[ArgNo]); \
65 Fn.setAttributes(AttributeList::get(Ctx, FnAttrs, RetAttrs, ArgAttrs)); \
66 break;
67#include "llvm/Frontend/OpenMP/OMPKinds.def"
68 default:
69 // Attributes are optional.
70 break;
71 }
72}
73
74FunctionCallee
75OpenMPIRBuilder::getOrCreateRuntimeFunction(Module &M, RuntimeFunction FnID) {
76 FunctionType *FnTy = nullptr;
77 Function *Fn = nullptr;
78
79 // Try to find the declation in the module first.
80 switch (FnID) {
81#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...) \
82 case Enum: \
83 FnTy = FunctionType::get(ReturnType, ArrayRef<Type *>{__VA_ARGS__}, \
84 IsVarArg); \
85 Fn = M.getFunction(Str); \
86 break;
87#include "llvm/Frontend/OpenMP/OMPKinds.def"
88 }
89
90 if (!Fn) {
91 // Create a new declaration if we need one.
92 switch (FnID) {
93#define OMP_RTL(Enum, Str, ...) \
94 case Enum: \
95 Fn = Function::Create(FnTy, GlobalValue::ExternalLinkage, Str, M); \
96 break;
97#include "llvm/Frontend/OpenMP/OMPKinds.def"
98 }
99
100 // Add information if the runtime function takes a callback function
101 if (FnID == OMPRTL___kmpc_fork_call || FnID == OMPRTL___kmpc_fork_teams) {
102 if (!Fn->hasMetadata(LLVMContext::MD_callback)) {
103 LLVMContext &Ctx = Fn->getContext();
104 MDBuilder MDB(Ctx);
105 // Annotate the callback behavior of the runtime function:
106 // - The callback callee is argument number 2 (microtask).
107 // - The first two arguments of the callback callee are unknown (-1).
108 // - All variadic arguments to the runtime function are passed to the
109 // callback callee.
110 Fn->addMetadata(
111 LLVMContext::MD_callback,
112 *MDNode::get(Ctx, {MDB.createCallbackEncoding(
113 2, {-1, -1}, /* VarArgsArePassed */ true)}));
114 }
115 }
116
117 LLVM_DEBUG(dbgs() << "Created OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
118 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Created OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
119 addAttributes(FnID, *Fn);
120
121 } else {
122 LLVM_DEBUG(dbgs() << "Found OpenMP runtime function " << Fn->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
123 << " with type " << *Fn->getFunctionType() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Found OpenMP runtime function "
<< Fn->getName() << " with type " << *Fn
->getFunctionType() << "\n"; } } while (false)
;
124 }
125
126 assert(Fn && "Failed to create OpenMP runtime function")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 126, __extension__ __PRETTY_FUNCTION__))
;
127
128 // Cast the function to the expected type if necessary
129 Constant *C = ConstantExpr::getBitCast(Fn, FnTy->getPointerTo());
130 return {FnTy, C};
131}
132
133Function *OpenMPIRBuilder::getOrCreateRuntimeFunctionPtr(RuntimeFunction FnID) {
134 FunctionCallee RTLFn = getOrCreateRuntimeFunction(M, FnID);
135 auto *Fn = dyn_cast<llvm::Function>(RTLFn.getCallee());
136 assert(Fn && "Failed to create OpenMP runtime function pointer")(static_cast <bool> (Fn && "Failed to create OpenMP runtime function pointer"
) ? void (0) : __assert_fail ("Fn && \"Failed to create OpenMP runtime function pointer\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 136, __extension__ __PRETTY_FUNCTION__))
;
137 return Fn;
138}
139
140void OpenMPIRBuilder::initialize() { initializeTypes(M); }
141
142void OpenMPIRBuilder::finalize(Function *Fn, bool AllowExtractorSinking) {
143 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
144 SmallVector<BasicBlock *, 32> Blocks;
145 SmallVector<OutlineInfo, 16> DeferredOutlines;
146 for (OutlineInfo &OI : OutlineInfos) {
147 // Skip functions that have not finalized yet; may happen with nested
148 // function generation.
149 if (Fn && OI.getFunction() != Fn) {
150 DeferredOutlines.push_back(OI);
151 continue;
152 }
153
154 ParallelRegionBlockSet.clear();
155 Blocks.clear();
156 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
157
158 Function *OuterFn = OI.getFunction();
159 CodeExtractorAnalysisCache CEAC(*OuterFn);
160 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
161 /* AggregateArgs */ false,
162 /* BlockFrequencyInfo */ nullptr,
163 /* BranchProbabilityInfo */ nullptr,
164 /* AssumptionCache */ nullptr,
165 /* AllowVarArgs */ true,
166 /* AllowAlloca */ true,
167 /* Suffix */ ".omp_par");
168
169 LLVM_DEBUG(dbgs() << "Before outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before outlining: "
<< *OuterFn << "\n"; } } while (false)
;
170 LLVM_DEBUG(dbgs() << "Entry " << OI.EntryBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
171 << " Exit: " << OI.ExitBB->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Entry " << OI
.EntryBB->getName() << " Exit: " << OI.ExitBB->
getName() << "\n"; } } while (false)
;
172 assert(Extractor.isEligible() &&(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
173 "Expected OpenMP outlining to be possible!")(static_cast <bool> (Extractor.isEligible() && "Expected OpenMP outlining to be possible!"
) ? void (0) : __assert_fail ("Extractor.isEligible() && \"Expected OpenMP outlining to be possible!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 173, __extension__ __PRETTY_FUNCTION__))
;
174
175 Function *OutlinedFn = Extractor.extractCodeRegion(CEAC);
176
177 LLVM_DEBUG(dbgs() << "After outlining: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After outlining: "
<< *OuterFn << "\n"; } } while (false)
;
178 LLVM_DEBUG(dbgs() << " Outlined function: " << *OutlinedFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << " Outlined function: "
<< *OutlinedFn << "\n"; } } while (false)
;
179 assert(OutlinedFn->getReturnType()->isVoidTy() &&(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 180, __extension__ __PRETTY_FUNCTION__))
180 "OpenMP outlined functions should not return a value!")(static_cast <bool> (OutlinedFn->getReturnType()->
isVoidTy() && "OpenMP outlined functions should not return a value!"
) ? void (0) : __assert_fail ("OutlinedFn->getReturnType()->isVoidTy() && \"OpenMP outlined functions should not return a value!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 180, __extension__ __PRETTY_FUNCTION__))
;
181
182 // For compability with the clang CG we move the outlined function after the
183 // one with the parallel region.
184 OutlinedFn->removeFromParent();
185 M.getFunctionList().insertAfter(OuterFn->getIterator(), OutlinedFn);
186
187 // Remove the artificial entry introduced by the extractor right away, we
188 // made our own entry block after all.
189 {
190 BasicBlock &ArtificialEntry = OutlinedFn->getEntryBlock();
191 assert(ArtificialEntry.getUniqueSuccessor() == OI.EntryBB)(static_cast <bool> (ArtificialEntry.getUniqueSuccessor
() == OI.EntryBB) ? void (0) : __assert_fail ("ArtificialEntry.getUniqueSuccessor() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 191, __extension__ __PRETTY_FUNCTION__))
;
192 assert(OI.EntryBB->getUniquePredecessor() == &ArtificialEntry)(static_cast <bool> (OI.EntryBB->getUniquePredecessor
() == &ArtificialEntry) ? void (0) : __assert_fail ("OI.EntryBB->getUniquePredecessor() == &ArtificialEntry"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 192, __extension__ __PRETTY_FUNCTION__))
;
193 if (AllowExtractorSinking) {
194 // Move instructions from the to-be-deleted ArtificialEntry to the entry
195 // basic block of the parallel region. CodeExtractor may have sunk
196 // allocas/bitcasts for values that are solely used in the outlined
197 // region and do not escape.
198 assert(!ArtificialEntry.empty() &&(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 199, __extension__ __PRETTY_FUNCTION__))
199 "Expected instructions to sink in the outlined region")(static_cast <bool> (!ArtificialEntry.empty() &&
"Expected instructions to sink in the outlined region") ? void
(0) : __assert_fail ("!ArtificialEntry.empty() && \"Expected instructions to sink in the outlined region\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 199, __extension__ __PRETTY_FUNCTION__))
;
200 for (BasicBlock::iterator It = ArtificialEntry.begin(),
201 End = ArtificialEntry.end();
202 It != End;) {
203 Instruction &I = *It;
204 It++;
205
206 if (I.isTerminator())
207 continue;
208
209 I.moveBefore(*OI.EntryBB, OI.EntryBB->getFirstInsertionPt());
210 }
211 }
212 OI.EntryBB->moveBefore(&ArtificialEntry);
213 ArtificialEntry.eraseFromParent();
214 }
215 assert(&OutlinedFn->getEntryBlock() == OI.EntryBB)(static_cast <bool> (&OutlinedFn->getEntryBlock(
) == OI.EntryBB) ? void (0) : __assert_fail ("&OutlinedFn->getEntryBlock() == OI.EntryBB"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 215, __extension__ __PRETTY_FUNCTION__))
;
216 assert(OutlinedFn && OutlinedFn->getNumUses() == 1)(static_cast <bool> (OutlinedFn && OutlinedFn->
getNumUses() == 1) ? void (0) : __assert_fail ("OutlinedFn && OutlinedFn->getNumUses() == 1"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 216, __extension__ __PRETTY_FUNCTION__))
;
217
218 // Run a user callback, e.g. to add attributes.
219 if (OI.PostOutlineCB)
220 OI.PostOutlineCB(*OutlinedFn);
221 }
222
223 // Remove work items that have been completed.
224 OutlineInfos = std::move(DeferredOutlines);
225}
226
227OpenMPIRBuilder::~OpenMPIRBuilder() {
228 assert(OutlineInfos.empty() && "There must be no outstanding outlinings")(static_cast <bool> (OutlineInfos.empty() && "There must be no outstanding outlinings"
) ? void (0) : __assert_fail ("OutlineInfos.empty() && \"There must be no outstanding outlinings\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 228, __extension__ __PRETTY_FUNCTION__))
;
229}
230
231Value *OpenMPIRBuilder::getOrCreateIdent(Constant *SrcLocStr,
232 IdentFlag LocFlags,
233 unsigned Reserve2Flags) {
234 // Enable "C-mode".
235 LocFlags |= OMP_IDENT_FLAG_KMPC;
236
237 Value *&Ident =
238 IdentMap[{SrcLocStr, uint64_t(LocFlags) << 31 | Reserve2Flags}];
239 if (!Ident) {
240 Constant *I32Null = ConstantInt::getNullValue(Int32);
241 Constant *IdentData[] = {
242 I32Null, ConstantInt::get(Int32, uint32_t(LocFlags)),
243 ConstantInt::get(Int32, Reserve2Flags), I32Null, SrcLocStr};
244 Constant *Initializer = ConstantStruct::get(
245 cast<StructType>(IdentPtr->getPointerElementType()), IdentData);
246
247 // Look for existing encoding of the location + flags, not needed but
248 // minimizes the difference to the existing solution while we transition.
249 for (GlobalVariable &GV : M.getGlobalList())
250 if (GV.getType() == IdentPtr && GV.hasInitializer())
251 if (GV.getInitializer() == Initializer)
252 return Ident = &GV;
253
254 auto *GV = new GlobalVariable(M, IdentPtr->getPointerElementType(),
255 /* isConstant = */ true,
256 GlobalValue::PrivateLinkage, Initializer);
257 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
258 GV->setAlignment(Align(8));
259 Ident = GV;
260 }
261 return Builder.CreatePointerCast(Ident, IdentPtr);
262}
263
264Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef LocStr) {
265 Constant *&SrcLocStr = SrcLocStrMap[LocStr];
266 if (!SrcLocStr) {
267 Constant *Initializer =
268 ConstantDataArray::getString(M.getContext(), LocStr);
269
270 // Look for existing encoding of the location, not needed but minimizes the
271 // difference to the existing solution while we transition.
272 for (GlobalVariable &GV : M.getGlobalList())
273 if (GV.isConstant() && GV.hasInitializer() &&
274 GV.getInitializer() == Initializer)
275 return SrcLocStr = ConstantExpr::getPointerCast(&GV, Int8Ptr);
276
277 SrcLocStr = Builder.CreateGlobalStringPtr(LocStr, /* Name */ "",
278 /* AddressSpace */ 0, &M);
279 }
280 return SrcLocStr;
281}
282
283Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(StringRef FunctionName,
284 StringRef FileName,
285 unsigned Line,
286 unsigned Column) {
287 SmallString<128> Buffer;
288 Buffer.push_back(';');
289 Buffer.append(FileName);
290 Buffer.push_back(';');
291 Buffer.append(FunctionName);
292 Buffer.push_back(';');
293 Buffer.append(std::to_string(Line));
294 Buffer.push_back(';');
295 Buffer.append(std::to_string(Column));
296 Buffer.push_back(';');
297 Buffer.push_back(';');
298 return getOrCreateSrcLocStr(Buffer.str());
299}
300
301Constant *OpenMPIRBuilder::getOrCreateDefaultSrcLocStr() {
302 return getOrCreateSrcLocStr(";unknown;unknown;0;0;;");
303}
304
305Constant *OpenMPIRBuilder::getOrCreateSrcLocStr(DebugLoc DL, Function *F) {
306 DILocation *DIL = DL.get();
307 if (!DIL)
308 return getOrCreateDefaultSrcLocStr();
309 StringRef FileName = M.getName();
310 if (DIFile *DIF = DIL->getFile())
311 if (Optional<StringRef> Source = DIF->getSource())
312 FileName = *Source;
313 StringRef Function = DIL->getScope()->getSubprogram()->getName();
314 if (Function.empty() && F)
315 Function = F->getName();
316 return getOrCreateSrcLocStr(Function, FileName, DIL->getLine(),
317 DIL->getColumn());
318}
319
320Constant *
321OpenMPIRBuilder::getOrCreateSrcLocStr(const LocationDescription &Loc) {
322 return getOrCreateSrcLocStr(Loc.DL, Loc.IP.getBlock()->getParent());
323}
324
325Value *OpenMPIRBuilder::getOrCreateThreadID(Value *Ident) {
326 return Builder.CreateCall(
327 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num), Ident,
328 "omp_global_thread_num");
329}
330
331OpenMPIRBuilder::InsertPointTy
332OpenMPIRBuilder::createBarrier(const LocationDescription &Loc, Directive DK,
333 bool ForceSimpleCall, bool CheckCancelFlag) {
334 if (!updateToLocation(Loc))
335 return Loc.IP;
336 return emitBarrierImpl(Loc, DK, ForceSimpleCall, CheckCancelFlag);
337}
338
339OpenMPIRBuilder::InsertPointTy
340OpenMPIRBuilder::emitBarrierImpl(const LocationDescription &Loc, Directive Kind,
341 bool ForceSimpleCall, bool CheckCancelFlag) {
342 // Build call __kmpc_cancel_barrier(loc, thread_id) or
343 // __kmpc_barrier(loc, thread_id);
344
345 IdentFlag BarrierLocFlags;
346 switch (Kind) {
347 case OMPD_for:
348 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_FOR;
349 break;
350 case OMPD_sections:
351 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SECTIONS;
352 break;
353 case OMPD_single:
354 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL_SINGLE;
355 break;
356 case OMPD_barrier:
357 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_EXPL;
358 break;
359 default:
360 BarrierLocFlags = OMP_IDENT_FLAG_BARRIER_IMPL;
361 break;
362 }
363
364 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
365 Value *Args[] = {getOrCreateIdent(SrcLocStr, BarrierLocFlags),
366 getOrCreateThreadID(getOrCreateIdent(SrcLocStr))};
367
368 // If we are in a cancellable parallel region, barriers are cancellation
369 // points.
370 // TODO: Check why we would force simple calls or to ignore the cancel flag.
371 bool UseCancelBarrier =
372 !ForceSimpleCall && isLastFinalizationInfoCancellable(OMPD_parallel);
373
374 Value *Result =
375 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(
376 UseCancelBarrier ? OMPRTL___kmpc_cancel_barrier
377 : OMPRTL___kmpc_barrier),
378 Args);
379
380 if (UseCancelBarrier && CheckCancelFlag)
381 emitCancelationCheckImpl(Result, OMPD_parallel);
382
383 return Builder.saveIP();
384}
385
386OpenMPIRBuilder::InsertPointTy
387OpenMPIRBuilder::createCancel(const LocationDescription &Loc,
388 Value *IfCondition,
389 omp::Directive CanceledDirective) {
390 if (!updateToLocation(Loc))
391 return Loc.IP;
392
393 // LLVM utilities like blocks with terminators.
394 auto *UI = Builder.CreateUnreachable();
395
396 Instruction *ThenTI = UI, *ElseTI = nullptr;
397 if (IfCondition)
398 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
399 Builder.SetInsertPoint(ThenTI);
400
401 Value *CancelKind = nullptr;
402 switch (CanceledDirective) {
403#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value) \
404 case DirectiveEnum: \
405 CancelKind = Builder.getInt32(Value); \
406 break;
407#include "llvm/Frontend/OpenMP/OMPKinds.def"
408 default:
409 llvm_unreachable("Unknown cancel kind!")::llvm::llvm_unreachable_internal("Unknown cancel kind!", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 409)
;
410 }
411
412 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
413 Value *Ident = getOrCreateIdent(SrcLocStr);
414 Value *Args[] = {Ident, getOrCreateThreadID(Ident), CancelKind};
415 Value *Result = Builder.CreateCall(
416 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_cancel), Args);
417 auto ExitCB = [this, CanceledDirective, Loc](InsertPointTy IP) {
418 if (CanceledDirective == OMPD_parallel) {
419 IRBuilder<>::InsertPointGuard IPG(Builder);
420 Builder.restoreIP(IP);
421 createBarrier(LocationDescription(Builder.saveIP(), Loc.DL),
422 omp::Directive::OMPD_unknown, /* ForceSimpleCall */ false,
423 /* CheckCancelFlag */ false);
424 }
425 };
426
427 // The actual cancel logic is shared with others, e.g., cancel_barriers.
428 emitCancelationCheckImpl(Result, CanceledDirective, ExitCB);
429
430 // Update the insertion point and remove the terminator we introduced.
431 Builder.SetInsertPoint(UI->getParent());
432 UI->eraseFromParent();
433
434 return Builder.saveIP();
435}
436
437void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
438 omp::Directive CanceledDirective,
439 FinalizeCallbackTy ExitCB) {
440 assert(isLastFinalizationInfoCancellable(CanceledDirective) &&(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 441, __extension__ __PRETTY_FUNCTION__))
441 "Unexpected cancellation!")(static_cast <bool> (isLastFinalizationInfoCancellable(
CanceledDirective) && "Unexpected cancellation!") ? void
(0) : __assert_fail ("isLastFinalizationInfoCancellable(CanceledDirective) && \"Unexpected cancellation!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 441, __extension__ __PRETTY_FUNCTION__))
;
442
443 // For a cancel barrier we create two new blocks.
444 BasicBlock *BB = Builder.GetInsertBlock();
445 BasicBlock *NonCancellationBlock;
446 if (Builder.GetInsertPoint() == BB->end()) {
447 // TODO: This branch will not be needed once we moved to the
448 // OpenMPIRBuilder codegen completely.
449 NonCancellationBlock = BasicBlock::Create(
450 BB->getContext(), BB->getName() + ".cont", BB->getParent());
451 } else {
452 NonCancellationBlock = SplitBlock(BB, &*Builder.GetInsertPoint());
453 BB->getTerminator()->eraseFromParent();
454 Builder.SetInsertPoint(BB);
455 }
456 BasicBlock *CancellationBlock = BasicBlock::Create(
457 BB->getContext(), BB->getName() + ".cncl", BB->getParent());
458
459 // Jump to them based on the return value.
460 Value *Cmp = Builder.CreateIsNull(CancelFlag);
461 Builder.CreateCondBr(Cmp, NonCancellationBlock, CancellationBlock,
462 /* TODO weight */ nullptr, nullptr);
463
464 // From the cancellation block we finalize all variables and go to the
465 // post finalization block that is known to the FiniCB callback.
466 Builder.SetInsertPoint(CancellationBlock);
467 if (ExitCB)
468 ExitCB(Builder.saveIP());
469 auto &FI = FinalizationStack.back();
470 FI.FiniCB(Builder.saveIP());
471
472 // The continuation block is where code generation continues.
473 Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
474}
475
476IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
477 const LocationDescription &Loc, InsertPointTy OuterAllocaIP,
478 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
479 FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads,
480 omp::ProcBindKind ProcBind, bool IsCancellable) {
481 if (!updateToLocation(Loc))
482 return Loc.IP;
483
484 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
485 Value *Ident = getOrCreateIdent(SrcLocStr);
486 Value *ThreadID = getOrCreateThreadID(Ident);
487
488 if (NumThreads) {
489 // Build call __kmpc_push_num_threads(&Ident, global_tid, num_threads)
490 Value *Args[] = {
491 Ident, ThreadID,
492 Builder.CreateIntCast(NumThreads, Int32, /*isSigned*/ false)};
493 Builder.CreateCall(
494 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_num_threads), Args);
495 }
496
497 if (ProcBind != OMP_PROC_BIND_default) {
498 // Build call __kmpc_push_proc_bind(&Ident, global_tid, proc_bind)
499 Value *Args[] = {
500 Ident, ThreadID,
501 ConstantInt::get(Int32, unsigned(ProcBind), /*isSigned=*/true)};
502 Builder.CreateCall(
503 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_push_proc_bind), Args);
504 }
505
506 BasicBlock *InsertBB = Builder.GetInsertBlock();
507 Function *OuterFn = InsertBB->getParent();
508
509 // Save the outer alloca block because the insertion iterator may get
510 // invalidated and we still need this later.
511 BasicBlock *OuterAllocaBlock = OuterAllocaIP.getBlock();
512
513 // Vector to remember instructions we used only during the modeling but which
514 // we want to delete at the end.
515 SmallVector<Instruction *, 4> ToBeDeleted;
516
517 // Change the location to the outer alloca insertion point to create and
518 // initialize the allocas we pass into the parallel region.
519 Builder.restoreIP(OuterAllocaIP);
520 AllocaInst *TIDAddr = Builder.CreateAlloca(Int32, nullptr, "tid.addr");
521 AllocaInst *ZeroAddr = Builder.CreateAlloca(Int32, nullptr, "zero.addr");
522
523 // If there is an if condition we actually use the TIDAddr and ZeroAddr in the
524 // program, otherwise we only need them for modeling purposes to get the
525 // associated arguments in the outlined function. In the former case,
526 // initialize the allocas properly, in the latter case, delete them later.
527 if (IfCondition) {
528 Builder.CreateStore(Constant::getNullValue(Int32), TIDAddr);
529 Builder.CreateStore(Constant::getNullValue(Int32), ZeroAddr);
530 } else {
531 ToBeDeleted.push_back(TIDAddr);
532 ToBeDeleted.push_back(ZeroAddr);
533 }
534
535 // Create an artificial insertion point that will also ensure the blocks we
536 // are about to split are not degenerated.
537 auto *UI = new UnreachableInst(Builder.getContext(), InsertBB);
538
539 Instruction *ThenTI = UI, *ElseTI = nullptr;
540 if (IfCondition)
541 SplitBlockAndInsertIfThenElse(IfCondition, UI, &ThenTI, &ElseTI);
542
543 BasicBlock *ThenBB = ThenTI->getParent();
544 BasicBlock *PRegEntryBB = ThenBB->splitBasicBlock(ThenTI, "omp.par.entry");
545 BasicBlock *PRegBodyBB =
546 PRegEntryBB->splitBasicBlock(ThenTI, "omp.par.region");
547 BasicBlock *PRegPreFiniBB =
548 PRegBodyBB->splitBasicBlock(ThenTI, "omp.par.pre_finalize");
549 BasicBlock *PRegExitBB =
550 PRegPreFiniBB->splitBasicBlock(ThenTI, "omp.par.exit");
551
552 auto FiniCBWrapper = [&](InsertPointTy IP) {
553 // Hide "open-ended" blocks from the given FiniCB by setting the right jump
554 // target to the region exit block.
555 if (IP.getBlock()->end() == IP.getPoint()) {
556 IRBuilder<>::InsertPointGuard IPG(Builder);
557 Builder.restoreIP(IP);
558 Instruction *I = Builder.CreateBr(PRegExitBB);
559 IP = InsertPointTy(I->getParent(), I->getIterator());
560 }
561 assert(IP.getBlock()->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 563, __extension__ __PRETTY_FUNCTION__))
562 IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB &&(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 563, __extension__ __PRETTY_FUNCTION__))
563 "Unexpected insertion point for finalization call!")(static_cast <bool> (IP.getBlock()->getTerminator()->
getNumSuccessors() == 1 && IP.getBlock()->getTerminator
()->getSuccessor(0) == PRegExitBB && "Unexpected insertion point for finalization call!"
) ? void (0) : __assert_fail ("IP.getBlock()->getTerminator()->getNumSuccessors() == 1 && IP.getBlock()->getTerminator()->getSuccessor(0) == PRegExitBB && \"Unexpected insertion point for finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 563, __extension__ __PRETTY_FUNCTION__))
;
564 return FiniCB(IP);
565 };
566
567 FinalizationStack.push_back({FiniCBWrapper, OMPD_parallel, IsCancellable});
568
569 // Generate the privatization allocas in the block that will become the entry
570 // of the outlined function.
571 Builder.SetInsertPoint(PRegEntryBB->getTerminator());
572 InsertPointTy InnerAllocaIP = Builder.saveIP();
573
574 AllocaInst *PrivTIDAddr =
575 Builder.CreateAlloca(Int32, nullptr, "tid.addr.local");
576 Instruction *PrivTID = Builder.CreateLoad(Int32, PrivTIDAddr, "tid");
577
578 // Add some fake uses for OpenMP provided arguments.
579 ToBeDeleted.push_back(Builder.CreateLoad(Int32, TIDAddr, "tid.addr.use"));
580 Instruction *ZeroAddrUse = Builder.CreateLoad(Int32, ZeroAddr,
581 "zero.addr.use");
582 ToBeDeleted.push_back(ZeroAddrUse);
583
584 // ThenBB
585 // |
586 // V
587 // PRegionEntryBB <- Privatization allocas are placed here.
588 // |
589 // V
590 // PRegionBodyBB <- BodeGen is invoked here.
591 // |
592 // V
593 // PRegPreFiniBB <- The block we will start finalization from.
594 // |
595 // V
596 // PRegionExitBB <- A common exit to simplify block collection.
597 //
598
599 LLVM_DEBUG(dbgs() << "Before body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
600
601 // Let the caller create the body.
602 assert(BodyGenCB && "Expected body generation callback!")(static_cast <bool> (BodyGenCB && "Expected body generation callback!"
) ? void (0) : __assert_fail ("BodyGenCB && \"Expected body generation callback!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 602, __extension__ __PRETTY_FUNCTION__))
;
603 InsertPointTy CodeGenIP(PRegBodyBB, PRegBodyBB->begin());
604 BodyGenCB(InnerAllocaIP, CodeGenIP, *PRegPreFiniBB);
605
606 LLVM_DEBUG(dbgs() << "After body codegen: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After body codegen: "
<< *OuterFn << "\n"; } } while (false)
;
607
608 FunctionCallee RTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_fork_call);
609 if (auto *F = dyn_cast<llvm::Function>(RTLFn.getCallee())) {
610 if (!F->hasMetadata(llvm::LLVMContext::MD_callback)) {
611 llvm::LLVMContext &Ctx = F->getContext();
612 MDBuilder MDB(Ctx);
613 // Annotate the callback behavior of the __kmpc_fork_call:
614 // - The callback callee is argument number 2 (microtask).
615 // - The first two arguments of the callback callee are unknown (-1).
616 // - All variadic arguments to the __kmpc_fork_call are passed to the
617 // callback callee.
618 F->addMetadata(
619 llvm::LLVMContext::MD_callback,
620 *llvm::MDNode::get(
621 Ctx, {MDB.createCallbackEncoding(2, {-1, -1},
622 /* VarArgsArePassed */ true)}));
623 }
624 }
625
626 OutlineInfo OI;
627 OI.PostOutlineCB = [=](Function &OutlinedFn) {
628 // Add some known attributes.
629 OutlinedFn.addParamAttr(0, Attribute::NoAlias);
630 OutlinedFn.addParamAttr(1, Attribute::NoAlias);
631 OutlinedFn.addFnAttr(Attribute::NoUnwind);
632 OutlinedFn.addFnAttr(Attribute::NoRecurse);
633
634 assert(OutlinedFn.arg_size() >= 2 &&(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 635, __extension__ __PRETTY_FUNCTION__))
635 "Expected at least tid and bounded tid as arguments")(static_cast <bool> (OutlinedFn.arg_size() >= 2 &&
"Expected at least tid and bounded tid as arguments") ? void
(0) : __assert_fail ("OutlinedFn.arg_size() >= 2 && \"Expected at least tid and bounded tid as arguments\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 635, __extension__ __PRETTY_FUNCTION__))
;
636 unsigned NumCapturedVars =
637 OutlinedFn.arg_size() - /* tid & bounded tid */ 2;
638
639 CallInst *CI = cast<CallInst>(OutlinedFn.user_back());
640 CI->getParent()->setName("omp_parallel");
641 Builder.SetInsertPoint(CI);
642
643 // Build call __kmpc_fork_call(Ident, n, microtask, var1, .., varn);
644 Value *ForkCallArgs[] = {
645 Ident, Builder.getInt32(NumCapturedVars),
646 Builder.CreateBitCast(&OutlinedFn, ParallelTaskPtr)};
647
648 SmallVector<Value *, 16> RealArgs;
649 RealArgs.append(std::begin(ForkCallArgs), std::end(ForkCallArgs));
650 RealArgs.append(CI->arg_begin() + /* tid & bound tid */ 2, CI->arg_end());
651
652 Builder.CreateCall(RTLFn, RealArgs);
653
654 LLVM_DEBUG(dbgs() << "With fork_call placed: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
655 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With fork_call placed: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
656
657 InsertPointTy ExitIP(PRegExitBB, PRegExitBB->end());
658
659 // Initialize the local TID stack location with the argument value.
660 Builder.SetInsertPoint(PrivTID);
661 Function::arg_iterator OutlinedAI = OutlinedFn.arg_begin();
662 Builder.CreateStore(Builder.CreateLoad(Int32, OutlinedAI), PrivTIDAddr);
663
664 // If no "if" clause was present we do not need the call created during
665 // outlining, otherwise we reuse it in the serialized parallel region.
666 if (!ElseTI) {
667 CI->eraseFromParent();
668 } else {
669
670 // If an "if" clause was present we are now generating the serialized
671 // version into the "else" branch.
672 Builder.SetInsertPoint(ElseTI);
673
674 // Build calls __kmpc_serialized_parallel(&Ident, GTid);
675 Value *SerializedParallelCallArgs[] = {Ident, ThreadID};
676 Builder.CreateCall(
677 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_serialized_parallel),
678 SerializedParallelCallArgs);
679
680 // OutlinedFn(&GTid, &zero, CapturedStruct);
681 CI->removeFromParent();
682 Builder.Insert(CI);
683
684 // __kmpc_end_serialized_parallel(&Ident, GTid);
685 Value *EndArgs[] = {Ident, ThreadID};
686 Builder.CreateCall(
687 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_serialized_parallel),
688 EndArgs);
689
690 LLVM_DEBUG(dbgs() << "With serialized parallel region: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
691 << *Builder.GetInsertBlock()->getParent() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "With serialized parallel region: "
<< *Builder.GetInsertBlock()->getParent() << "\n"
; } } while (false)
;
692 }
693
694 for (Instruction *I : ToBeDeleted)
695 I->eraseFromParent();
696 };
697
698 // Adjust the finalization stack, verify the adjustment, and call the
699 // finalize function a last time to finalize values between the pre-fini
700 // block and the exit block if we left the parallel "the normal way".
701 auto FiniInfo = FinalizationStack.pop_back_val();
702 (void)FiniInfo;
703 assert(FiniInfo.DK == OMPD_parallel &&(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 704, __extension__ __PRETTY_FUNCTION__))
704 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_parallel &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_parallel && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 704, __extension__ __PRETTY_FUNCTION__))
;
705
706 Instruction *PRegPreFiniTI = PRegPreFiniBB->getTerminator();
707
708 InsertPointTy PreFiniIP(PRegPreFiniBB, PRegPreFiniTI->getIterator());
709 FiniCB(PreFiniIP);
710
711 OI.EntryBB = PRegEntryBB;
712 OI.ExitBB = PRegExitBB;
713
714 SmallPtrSet<BasicBlock *, 32> ParallelRegionBlockSet;
715 SmallVector<BasicBlock *, 32> Blocks;
716 OI.collectBlocks(ParallelRegionBlockSet, Blocks);
717
718 // Ensure a single exit node for the outlined region by creating one.
719 // We might have multiple incoming edges to the exit now due to finalizations,
720 // e.g., cancel calls that cause the control flow to leave the region.
721 BasicBlock *PRegOutlinedExitBB = PRegExitBB;
722 PRegExitBB = SplitBlock(PRegExitBB, &*PRegExitBB->getFirstInsertionPt());
723 PRegOutlinedExitBB->setName("omp.par.outlined.exit");
724 Blocks.push_back(PRegOutlinedExitBB);
725
726 CodeExtractorAnalysisCache CEAC(*OuterFn);
727 CodeExtractor Extractor(Blocks, /* DominatorTree */ nullptr,
728 /* AggregateArgs */ false,
729 /* BlockFrequencyInfo */ nullptr,
730 /* BranchProbabilityInfo */ nullptr,
731 /* AssumptionCache */ nullptr,
732 /* AllowVarArgs */ true,
733 /* AllowAlloca */ true,
734 /* Suffix */ ".omp_par");
735
736 // Find inputs to, outputs from the code region.
737 BasicBlock *CommonExit = nullptr;
738 SetVector<Value *> Inputs, Outputs, SinkingCands, HoistingCands;
739 Extractor.findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
740 Extractor.findInputsOutputs(Inputs, Outputs, SinkingCands);
741
742 LLVM_DEBUG(dbgs() << "Before privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Before privatization: "
<< *OuterFn << "\n"; } } while (false)
;
743
744 FunctionCallee TIDRTLFn =
745 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_global_thread_num);
746
747 auto PrivHelper = [&](Value &V) {
748 if (&V == TIDAddr || &V == ZeroAddr)
749 return;
750
751 SetVector<Use *> Uses;
752 for (Use &U : V.uses())
753 if (auto *UserI = dyn_cast<Instruction>(U.getUser()))
754 if (ParallelRegionBlockSet.count(UserI->getParent()))
755 Uses.insert(&U);
756
757 // __kmpc_fork_call expects extra arguments as pointers. If the input
758 // already has a pointer type, everything is fine. Otherwise, store the
759 // value onto stack and load it back inside the to-be-outlined region. This
760 // will ensure only the pointer will be passed to the function.
761 // FIXME: if there are more than 15 trailing arguments, they must be
762 // additionally packed in a struct.
763 Value *Inner = &V;
764 if (!V.getType()->isPointerTy()) {
765 IRBuilder<>::InsertPointGuard Guard(Builder);
766 LLVM_DEBUG(llvm::dbgs() << "Forwarding input as pointer: " << V << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { llvm::dbgs() << "Forwarding input as pointer: "
<< V << "\n"; } } while (false)
;
767
768 Builder.restoreIP(OuterAllocaIP);
769 Value *Ptr =
770 Builder.CreateAlloca(V.getType(), nullptr, V.getName() + ".reloaded");
771
772 // Store to stack at end of the block that currently branches to the entry
773 // block of the to-be-outlined region.
774 Builder.SetInsertPoint(InsertBB,
775 InsertBB->getTerminator()->getIterator());
776 Builder.CreateStore(&V, Ptr);
777
778 // Load back next to allocations in the to-be-outlined region.
779 Builder.restoreIP(InnerAllocaIP);
780 Inner = Builder.CreateLoad(V.getType(), Ptr);
781 }
782
783 Value *ReplacementValue = nullptr;
784 CallInst *CI = dyn_cast<CallInst>(&V);
785 if (CI && CI->getCalledFunction() == TIDRTLFn.getCallee()) {
786 ReplacementValue = PrivTID;
787 } else {
788 Builder.restoreIP(
789 PrivCB(InnerAllocaIP, Builder.saveIP(), V, *Inner, ReplacementValue));
790 assert(ReplacementValue &&(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 791, __extension__ __PRETTY_FUNCTION__))
791 "Expected copy/create callback to set replacement value!")(static_cast <bool> (ReplacementValue && "Expected copy/create callback to set replacement value!"
) ? void (0) : __assert_fail ("ReplacementValue && \"Expected copy/create callback to set replacement value!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 791, __extension__ __PRETTY_FUNCTION__))
;
792 if (ReplacementValue == &V)
793 return;
794 }
795
796 for (Use *UPtr : Uses)
797 UPtr->set(ReplacementValue);
798 };
799
800 // Reset the inner alloca insertion as it will be used for loading the values
801 // wrapped into pointers before passing them into the to-be-outlined region.
802 // Configure it to insert immediately after the fake use of zero address so
803 // that they are available in the generated body and so that the
804 // OpenMP-related values (thread ID and zero address pointers) remain leading
805 // in the argument list.
806 InnerAllocaIP = IRBuilder<>::InsertPoint(
807 ZeroAddrUse->getParent(), ZeroAddrUse->getNextNode()->getIterator());
808
809 // Reset the outer alloca insertion point to the entry of the relevant block
810 // in case it was invalidated.
811 OuterAllocaIP = IRBuilder<>::InsertPoint(
812 OuterAllocaBlock, OuterAllocaBlock->getFirstInsertionPt());
813
814 for (Value *Input : Inputs) {
815 LLVM_DEBUG(dbgs() << "Captured input: " << *Input << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "Captured input: " <<
*Input << "\n"; } } while (false)
;
816 PrivHelper(*Input);
817 }
818 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
819 for (Value *Output : Outputs)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
820 LLVM_DEBUG(dbgs() << "Captured output: " << *Output << "\n");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
821 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (Value *Output : Outputs) do {
if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(
"openmp-ir-builder")) { dbgs() << "Captured output: " <<
*Output << "\n"; } } while (false); }; } } while (false
)
;
822 assert(Outputs.empty() &&(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 823, __extension__ __PRETTY_FUNCTION__))
823 "OpenMP outlining should not produce live-out values!")(static_cast <bool> (Outputs.empty() && "OpenMP outlining should not produce live-out values!"
) ? void (0) : __assert_fail ("Outputs.empty() && \"OpenMP outlining should not produce live-out values!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 823, __extension__ __PRETTY_FUNCTION__))
;
824
825 LLVM_DEBUG(dbgs() << "After privatization: " << *OuterFn << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { dbgs() << "After privatization: "
<< *OuterFn << "\n"; } } while (false)
;
826 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
827 for (auto *BB : Blocks)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
828 dbgs() << " PBR: " << BB->getName() << "\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
829 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { for (auto *BB : Blocks) dbgs() <<
" PBR: " << BB->getName() << "\n"; }; } } while
(false)
;
830
831 // Register the outlined info.
832 addOutlineInfo(std::move(OI));
833
834 InsertPointTy AfterIP(UI->getParent(), UI->getParent()->end());
835 UI->eraseFromParent();
836
837 return AfterIP;
838}
839
840void OpenMPIRBuilder::emitFlush(const LocationDescription &Loc) {
841 // Build call void __kmpc_flush(ident_t *loc)
842 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
843 Value *Args[] = {getOrCreateIdent(SrcLocStr)};
844
845 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_flush), Args);
846}
847
848void OpenMPIRBuilder::createFlush(const LocationDescription &Loc) {
849 if (!updateToLocation(Loc))
850 return;
851 emitFlush(Loc);
852}
853
854void OpenMPIRBuilder::emitTaskwaitImpl(const LocationDescription &Loc) {
855 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
856 // global_tid);
857 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
858 Value *Ident = getOrCreateIdent(SrcLocStr);
859 Value *Args[] = {Ident, getOrCreateThreadID(Ident)};
860
861 // Ignore return result until untied tasks are supported.
862 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskwait),
863 Args);
864}
865
866void OpenMPIRBuilder::createTaskwait(const LocationDescription &Loc) {
867 if (!updateToLocation(Loc))
868 return;
869 emitTaskwaitImpl(Loc);
870}
871
872void OpenMPIRBuilder::emitTaskyieldImpl(const LocationDescription &Loc) {
873 // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
874 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
875 Value *Ident = getOrCreateIdent(SrcLocStr);
876 Constant *I32Null = ConstantInt::getNullValue(Int32);
877 Value *Args[] = {Ident, getOrCreateThreadID(Ident), I32Null};
878
879 Builder.CreateCall(getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_omp_taskyield),
880 Args);
881}
882
883void OpenMPIRBuilder::createTaskyield(const LocationDescription &Loc) {
884 if (!updateToLocation(Loc))
885 return;
886 emitTaskyieldImpl(Loc);
887}
888
889OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createSections(
890 const LocationDescription &Loc, InsertPointTy AllocaIP,
891 ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB,
892 FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait) {
893 if (!updateToLocation(Loc))
894 return Loc.IP;
895
896 auto FiniCBWrapper = [&](InsertPointTy IP) {
897 if (IP.getBlock()->end() != IP.getPoint())
898 return FiniCB(IP);
899 // This must be done otherwise any nested constructs using FinalizeOMPRegion
900 // will fail because that function requires the Finalization Basic Block to
901 // have a terminator, which is already removed by EmitOMPRegionBody.
902 // IP is currently at cancelation block.
903 // We need to backtrack to the condition block to fetch
904 // the exit block and create a branch from cancelation
905 // to exit block.
906 IRBuilder<>::InsertPointGuard IPG(Builder);
907 Builder.restoreIP(IP);
908 auto *CaseBB = IP.getBlock()->getSinglePredecessor();
909 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
910 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
911 Instruction *I = Builder.CreateBr(ExitBB);
912 IP = InsertPointTy(I->getParent(), I->getIterator());
913 return FiniCB(IP);
914 };
915
916 FinalizationStack.push_back({FiniCBWrapper, OMPD_sections, IsCancellable});
917
918 // Each section is emitted as a switch case
919 // Each finalization callback is handled from clang.EmitOMPSectionDirective()
920 // -> OMP.createSection() which generates the IR for each section
921 // Iterate through all sections and emit a switch construct:
922 // switch (IV) {
923 // case 0:
924 // <SectionStmt[0]>;
925 // break;
926 // ...
927 // case <NumSection> - 1:
928 // <SectionStmt[<NumSection> - 1]>;
929 // break;
930 // }
931 // ...
932 // section_loop.after:
933 // <FiniCB>;
934 auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, Value *IndVar) {
935 auto *CurFn = CodeGenIP.getBlock()->getParent();
936 auto *ForIncBB = CodeGenIP.getBlock()->getSingleSuccessor();
937 auto *ForExitBB = CodeGenIP.getBlock()
938 ->getSinglePredecessor()
939 ->getTerminator()
940 ->getSuccessor(1);
941 SwitchInst *SwitchStmt = Builder.CreateSwitch(IndVar, ForIncBB);
942 Builder.restoreIP(CodeGenIP);
943 unsigned CaseNumber = 0;
944 for (auto SectionCB : SectionCBs) {
945 auto *CaseBB = BasicBlock::Create(M.getContext(),
946 "omp_section_loop.body.case", CurFn);
947 SwitchStmt->addCase(Builder.getInt32(CaseNumber), CaseBB);
948 Builder.SetInsertPoint(CaseBB);
949 SectionCB(InsertPointTy(), Builder.saveIP(), *ForExitBB);
950 CaseNumber++;
951 }
952 // remove the existing terminator from body BB since there can be no
953 // terminators after switch/case
954 CodeGenIP.getBlock()->getTerminator()->eraseFromParent();
955 };
956 // Loop body ends here
957 // LowerBound, UpperBound, and STride for createCanonicalLoop
958 Type *I32Ty = Type::getInt32Ty(M.getContext());
959 Value *LB = ConstantInt::get(I32Ty, 0);
960 Value *UB = ConstantInt::get(I32Ty, SectionCBs.size());
961 Value *ST = ConstantInt::get(I32Ty, 1);
962 llvm::CanonicalLoopInfo *LoopInfo = createCanonicalLoop(
963 Loc, LoopBodyGenCB, LB, UB, ST, true, false, AllocaIP, "section_loop");
964 InsertPointTy AfterIP =
965 applyStaticWorkshareLoop(Loc.DL, LoopInfo, AllocaIP, true);
966 BasicBlock *LoopAfterBB = AfterIP.getBlock();
967 Instruction *SplitPos = LoopAfterBB->getTerminator();
968 if (!isa_and_nonnull<BranchInst>(SplitPos))
969 SplitPos = new UnreachableInst(Builder.getContext(), LoopAfterBB);
970 // ExitBB after LoopAfterBB because LoopAfterBB is used for FinalizationCB,
971 // which requires a BB with branch
972 BasicBlock *ExitBB =
973 LoopAfterBB->splitBasicBlock(SplitPos, "omp_sections.end");
974 SplitPos->eraseFromParent();
975
976 // Apply the finalization callback in LoopAfterBB
977 auto FiniInfo = FinalizationStack.pop_back_val();
978 assert(FiniInfo.DK == OMPD_sections &&(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 979, __extension__ __PRETTY_FUNCTION__))
979 "Unexpected finalization stack state!")(static_cast <bool> (FiniInfo.DK == OMPD_sections &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("FiniInfo.DK == OMPD_sections && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 979, __extension__ __PRETTY_FUNCTION__))
;
980 Builder.SetInsertPoint(LoopAfterBB->getTerminator());
981 FiniInfo.FiniCB(Builder.saveIP());
982 Builder.SetInsertPoint(ExitBB);
983
984 return Builder.saveIP();
985}
986
987OpenMPIRBuilder::InsertPointTy
988OpenMPIRBuilder::createSection(const LocationDescription &Loc,
989 BodyGenCallbackTy BodyGenCB,
990 FinalizeCallbackTy FiniCB) {
991 if (!updateToLocation(Loc))
992 return Loc.IP;
993
994 auto FiniCBWrapper = [&](InsertPointTy IP) {
995 if (IP.getBlock()->end() != IP.getPoint())
996 return FiniCB(IP);
997 // This must be done otherwise any nested constructs using FinalizeOMPRegion
998 // will fail because that function requires the Finalization Basic Block to
999 // have a terminator, which is already removed by EmitOMPRegionBody.
1000 // IP is currently at cancelation block.
1001 // We need to backtrack to the condition block to fetch
1002 // the exit block and create a branch from cancelation
1003 // to exit block.
1004 IRBuilder<>::InsertPointGuard IPG(Builder);
1005 Builder.restoreIP(IP);
1006 auto *CaseBB = Loc.IP.getBlock();
1007 auto *CondBB = CaseBB->getSinglePredecessor()->getSinglePredecessor();
1008 auto *ExitBB = CondBB->getTerminator()->getSuccessor(1);
1009 Instruction *I = Builder.CreateBr(ExitBB);
1010 IP = InsertPointTy(I->getParent(), I->getIterator());
1011 return FiniCB(IP);
1012 };
1013
1014 Directive OMPD = Directive::OMPD_sections;
1015 // Since we are using Finalization Callback here, HasFinalize
1016 // and IsCancellable have to be true
1017 return EmitOMPInlinedRegion(OMPD, nullptr, nullptr, BodyGenCB, FiniCBWrapper,
1018 /*Conditional*/ false, /*hasFinalize*/ true,
1019 /*IsCancellable*/ true);
1020}
1021
1022/// Create a function with a unique name and a "void (i8*, i8*)" signature in
1023/// the given module and return it.
1024Function *getFreshReductionFunc(Module &M) {
1025 Type *VoidTy = Type::getVoidTy(M.getContext());
1026 Type *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
1027 auto *FuncTy =
1028 FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy}, /* IsVarArg */ false);
1029 return Function::Create(FuncTy, GlobalVariable::InternalLinkage,
1030 M.getDataLayout().getDefaultGlobalsAddressSpace(),
1031 ".omp.reduction.func", &M);
1032}
1033
1034OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createReductions(
1035 const LocationDescription &Loc, InsertPointTy AllocaIP,
1036 ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait) {
1037 for (const ReductionInfo &RI : ReductionInfos) {
1038 (void)RI;
1039 assert(RI.Variable && "expected non-null variable")(static_cast <bool> (RI.Variable && "expected non-null variable"
) ? void (0) : __assert_fail ("RI.Variable && \"expected non-null variable\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1039, __extension__ __PRETTY_FUNCTION__))
;
1040 assert(RI.PrivateVariable && "expected non-null private variable")(static_cast <bool> (RI.PrivateVariable && "expected non-null private variable"
) ? void (0) : __assert_fail ("RI.PrivateVariable && \"expected non-null private variable\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1040, __extension__ __PRETTY_FUNCTION__))
;
1041 assert(RI.ReductionGen && "expected non-null reduction generator callback")(static_cast <bool> (RI.ReductionGen && "expected non-null reduction generator callback"
) ? void (0) : __assert_fail ("RI.ReductionGen && \"expected non-null reduction generator callback\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1041, __extension__ __PRETTY_FUNCTION__))
;
1042 assert(RI.Variable->getType() == RI.PrivateVariable->getType() &&(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1044, __extension__ __PRETTY_FUNCTION__))
1043 "expected variables and their private equivalents to have the same "(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1044, __extension__ __PRETTY_FUNCTION__))
1044 "type")(static_cast <bool> (RI.Variable->getType() == RI.PrivateVariable
->getType() && "expected variables and their private equivalents to have the same "
"type") ? void (0) : __assert_fail ("RI.Variable->getType() == RI.PrivateVariable->getType() && \"expected variables and their private equivalents to have the same \" \"type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1044, __extension__ __PRETTY_FUNCTION__))
;
1045 assert(RI.Variable->getType()->isPointerTy() &&(static_cast <bool> (RI.Variable->getType()->isPointerTy
() && "expected variables to be pointers") ? void (0)
: __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1046, __extension__ __PRETTY_FUNCTION__))
1046 "expected variables to be pointers")(static_cast <bool> (RI.Variable->getType()->isPointerTy
() && "expected variables to be pointers") ? void (0)
: __assert_fail ("RI.Variable->getType()->isPointerTy() && \"expected variables to be pointers\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1046, __extension__ __PRETTY_FUNCTION__))
;
1047 }
1048
1049 if (!updateToLocation(Loc))
1050 return InsertPointTy();
1051
1052 BasicBlock *InsertBlock = Loc.IP.getBlock();
1053 BasicBlock *ContinuationBlock =
1054 InsertBlock->splitBasicBlock(Loc.IP.getPoint(), "reduce.finalize");
1055 InsertBlock->getTerminator()->eraseFromParent();
1056
1057 // Create and populate array of type-erased pointers to private reduction
1058 // values.
1059 unsigned NumReductions = ReductionInfos.size();
1060 Type *RedArrayTy = ArrayType::get(Builder.getInt8PtrTy(), NumReductions);
1061 Builder.restoreIP(AllocaIP);
1062 Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
1063
1064 Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
1065
1066 for (auto En : enumerate(ReductionInfos)) {
1067 unsigned Index = En.index();
1068 const ReductionInfo &RI = En.value();
1069 Value *RedArrayElemPtr = Builder.CreateConstInBoundsGEP2_64(
1070 RedArrayTy, RedArray, 0, Index, "red.array.elem." + Twine(Index));
1071 Value *Casted =
1072 Builder.CreateBitCast(RI.PrivateVariable, Builder.getInt8PtrTy(),
1073 "private.red.var." + Twine(Index) + ".casted");
1074 Builder.CreateStore(Casted, RedArrayElemPtr);
1075 }
1076
1077 // Emit a call to the runtime function that orchestrates the reduction.
1078 // Declare the reduction function in the process.
1079 Function *Func = Builder.GetInsertBlock()->getParent();
1080 Module *Module = Func->getParent();
1081 Value *RedArrayPtr =
1082 Builder.CreateBitCast(RedArray, Builder.getInt8PtrTy(), "red.array.ptr");
1083 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1084 bool CanGenerateAtomic =
1085 llvm::all_of(ReductionInfos, [](const ReductionInfo &RI) {
1086 return RI.AtomicReductionGen;
1087 });
1088 Value *Ident = getOrCreateIdent(
1089 SrcLocStr, CanGenerateAtomic ? IdentFlag::OMP_IDENT_FLAG_ATOMIC_REDUCE
1090 : IdentFlag(0));
1091 Value *ThreadId = getOrCreateThreadID(Ident);
1092 Constant *NumVariables = Builder.getInt32(NumReductions);
1093 const DataLayout &DL = Module->getDataLayout();
1094 unsigned RedArrayByteSize = DL.getTypeStoreSize(RedArrayTy);
1095 Constant *RedArraySize = Builder.getInt64(RedArrayByteSize);
1096 Function *ReductionFunc = getFreshReductionFunc(*Module);
1097 Value *Lock = getOMPCriticalRegionLock(".reduction");
1098 Function *ReduceFunc = getOrCreateRuntimeFunctionPtr(
1099 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_reduce_nowait
1100 : RuntimeFunction::OMPRTL___kmpc_reduce);
1101 CallInst *ReduceCall =
1102 Builder.CreateCall(ReduceFunc,
1103 {Ident, ThreadId, NumVariables, RedArraySize,
1104 RedArrayPtr, ReductionFunc, Lock},
1105 "reduce");
1106
1107 // Create final reduction entry blocks for the atomic and non-atomic case.
1108 // Emit IR that dispatches control flow to one of the blocks based on the
1109 // reduction supporting the atomic mode.
1110 BasicBlock *NonAtomicRedBlock =
1111 BasicBlock::Create(Module->getContext(), "reduce.switch.nonatomic", Func);
1112 BasicBlock *AtomicRedBlock =
1113 BasicBlock::Create(Module->getContext(), "reduce.switch.atomic", Func);
1114 SwitchInst *Switch =
1115 Builder.CreateSwitch(ReduceCall, ContinuationBlock, /* NumCases */ 2);
1116 Switch->addCase(Builder.getInt32(1), NonAtomicRedBlock);
1117 Switch->addCase(Builder.getInt32(2), AtomicRedBlock);
1118
1119 // Populate the non-atomic reduction using the elementwise reduction function.
1120 // This loads the elements from the global and private variables and reduces
1121 // them before storing back the result to the global variable.
1122 Builder.SetInsertPoint(NonAtomicRedBlock);
1123 for (auto En : enumerate(ReductionInfos)) {
1124 const ReductionInfo &RI = En.value();
1125 Type *ValueType = RI.getElementType();
1126 Value *RedValue = Builder.CreateLoad(ValueType, RI.Variable,
1127 "red.value." + Twine(En.index()));
1128 Value *PrivateRedValue =
1129 Builder.CreateLoad(ValueType, RI.PrivateVariable,
1130 "red.private.value." + Twine(En.index()));
1131 Value *Reduced;
1132 Builder.restoreIP(
1133 RI.ReductionGen(Builder.saveIP(), RedValue, PrivateRedValue, Reduced));
1134 if (!Builder.GetInsertBlock())
1135 return InsertPointTy();
1136 Builder.CreateStore(Reduced, RI.Variable);
1137 }
1138 Function *EndReduceFunc = getOrCreateRuntimeFunctionPtr(
1139 IsNoWait ? RuntimeFunction::OMPRTL___kmpc_end_reduce_nowait
1140 : RuntimeFunction::OMPRTL___kmpc_end_reduce);
1141 Builder.CreateCall(EndReduceFunc, {Ident, ThreadId, Lock});
1142 Builder.CreateBr(ContinuationBlock);
1143
1144 // Populate the atomic reduction using the atomic elementwise reduction
1145 // function. There are no loads/stores here because they will be happening
1146 // inside the atomic elementwise reduction.
1147 Builder.SetInsertPoint(AtomicRedBlock);
1148 if (CanGenerateAtomic) {
1149 for (const ReductionInfo &RI : ReductionInfos) {
1150 Builder.restoreIP(RI.AtomicReductionGen(Builder.saveIP(), RI.Variable,
1151 RI.PrivateVariable));
1152 if (!Builder.GetInsertBlock())
1153 return InsertPointTy();
1154 }
1155 Builder.CreateBr(ContinuationBlock);
1156 } else {
1157 Builder.CreateUnreachable();
1158 }
1159
1160 // Populate the outlined reduction function using the elementwise reduction
1161 // function. Partial values are extracted from the type-erased array of
1162 // pointers to private variables.
1163 BasicBlock *ReductionFuncBlock =
1164 BasicBlock::Create(Module->getContext(), "", ReductionFunc);
1165 Builder.SetInsertPoint(ReductionFuncBlock);
1166 Value *LHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(0),
1167 RedArrayTy->getPointerTo());
1168 Value *RHSArrayPtr = Builder.CreateBitCast(ReductionFunc->getArg(1),
1169 RedArrayTy->getPointerTo());
1170 for (auto En : enumerate(ReductionInfos)) {
1171 const ReductionInfo &RI = En.value();
1172 Value *LHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64(
1173 RedArrayTy, LHSArrayPtr, 0, En.index());
1174 Value *LHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), LHSI8PtrPtr);
1175 Value *LHSPtr = Builder.CreateBitCast(LHSI8Ptr, RI.Variable->getType());
1176 Value *LHS = Builder.CreateLoad(RI.getElementType(), LHSPtr);
1177 Value *RHSI8PtrPtr = Builder.CreateConstInBoundsGEP2_64(
1178 RedArrayTy, RHSArrayPtr, 0, En.index());
1179 Value *RHSI8Ptr = Builder.CreateLoad(Builder.getInt8PtrTy(), RHSI8PtrPtr);
1180 Value *RHSPtr =
1181 Builder.CreateBitCast(RHSI8Ptr, RI.PrivateVariable->getType());
1182 Value *RHS = Builder.CreateLoad(RI.getElementType(), RHSPtr);
1183 Value *Reduced;
1184 Builder.restoreIP(RI.ReductionGen(Builder.saveIP(), LHS, RHS, Reduced));
1185 if (!Builder.GetInsertBlock())
1186 return InsertPointTy();
1187 Builder.CreateStore(Reduced, LHSPtr);
1188 }
1189 Builder.CreateRetVoid();
1190
1191 Builder.SetInsertPoint(ContinuationBlock);
1192 return Builder.saveIP();
1193}
1194
1195OpenMPIRBuilder::InsertPointTy
1196OpenMPIRBuilder::createMaster(const LocationDescription &Loc,
1197 BodyGenCallbackTy BodyGenCB,
1198 FinalizeCallbackTy FiniCB) {
1199
1200 if (!updateToLocation(Loc))
1201 return Loc.IP;
1202
1203 Directive OMPD = Directive::OMPD_master;
1204 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1205 Value *Ident = getOrCreateIdent(SrcLocStr);
1206 Value *ThreadId = getOrCreateThreadID(Ident);
1207 Value *Args[] = {Ident, ThreadId};
1208
1209 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_master);
1210 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1211
1212 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_master);
1213 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
1214
1215 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1216 /*Conditional*/ true, /*hasFinalize*/ true);
1217}
1218
1219OpenMPIRBuilder::InsertPointTy
1220OpenMPIRBuilder::createMasked(const LocationDescription &Loc,
1221 BodyGenCallbackTy BodyGenCB,
1222 FinalizeCallbackTy FiniCB, Value *Filter) {
1223 if (!updateToLocation(Loc))
1224 return Loc.IP;
1225
1226 Directive OMPD = Directive::OMPD_masked;
1227 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
1228 Value *Ident = getOrCreateIdent(SrcLocStr);
1229 Value *ThreadId = getOrCreateThreadID(Ident);
1230 Value *Args[] = {Ident, ThreadId, Filter};
1231 Value *ArgsEnd[] = {Ident, ThreadId};
1232
1233 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_masked);
1234 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
1235
1236 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_masked);
1237 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, ArgsEnd);
1238
1239 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
1240 /*Conditional*/ true, /*hasFinalize*/ true);
1241}
1242
1243CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton(
1244 DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore,
1245 BasicBlock *PostInsertBefore, const Twine &Name) {
1246 Module *M = F->getParent();
1247 LLVMContext &Ctx = M->getContext();
1248 Type *IndVarTy = TripCount->getType();
1249
1250 // Create the basic block structure.
1251 BasicBlock *Preheader =
1252 BasicBlock::Create(Ctx, "omp_" + Name + ".preheader", F, PreInsertBefore);
1253 BasicBlock *Header =
1254 BasicBlock::Create(Ctx, "omp_" + Name + ".header", F, PreInsertBefore);
1255 BasicBlock *Cond =
1256 BasicBlock::Create(Ctx, "omp_" + Name + ".cond", F, PreInsertBefore);
1257 BasicBlock *Body =
1258 BasicBlock::Create(Ctx, "omp_" + Name + ".body", F, PreInsertBefore);
1259 BasicBlock *Latch =
1260 BasicBlock::Create(Ctx, "omp_" + Name + ".inc", F, PostInsertBefore);
1261 BasicBlock *Exit =
1262 BasicBlock::Create(Ctx, "omp_" + Name + ".exit", F, PostInsertBefore);
1263 BasicBlock *After =
1264 BasicBlock::Create(Ctx, "omp_" + Name + ".after", F, PostInsertBefore);
1265
1266 // Use specified DebugLoc for new instructions.
1267 Builder.SetCurrentDebugLocation(DL);
1268
1269 Builder.SetInsertPoint(Preheader);
1270 Builder.CreateBr(Header);
1271
1272 Builder.SetInsertPoint(Header);
1273 PHINode *IndVarPHI = Builder.CreatePHI(IndVarTy, 2, "omp_" + Name + ".iv");
1274 IndVarPHI->addIncoming(ConstantInt::get(IndVarTy, 0), Preheader);
1275 Builder.CreateBr(Cond);
1276
1277 Builder.SetInsertPoint(Cond);
1278 Value *Cmp =
1279 Builder.CreateICmpULT(IndVarPHI, TripCount, "omp_" + Name + ".cmp");
1280 Builder.CreateCondBr(Cmp, Body, Exit);
1281
1282 Builder.SetInsertPoint(Body);
1283 Builder.CreateBr(Latch);
1284
1285 Builder.SetInsertPoint(Latch);
1286 Value *Next = Builder.CreateAdd(IndVarPHI, ConstantInt::get(IndVarTy, 1),
1287 "omp_" + Name + ".next", /*HasNUW=*/true);
1288 Builder.CreateBr(Header);
1289 IndVarPHI->addIncoming(Next, Latch);
1290
1291 Builder.SetInsertPoint(Exit);
1292 Builder.CreateBr(After);
1293
1294 // Remember and return the canonical control flow.
1295 LoopInfos.emplace_front();
1296 CanonicalLoopInfo *CL = &LoopInfos.front();
1297
1298 CL->Preheader = Preheader;
1299 CL->Header = Header;
1300 CL->Cond = Cond;
1301 CL->Body = Body;
1302 CL->Latch = Latch;
1303 CL->Exit = Exit;
1304 CL->After = After;
1305
1306#ifndef NDEBUG
1307 CL->assertOK();
1308#endif
1309 return CL;
1310}
1311
1312CanonicalLoopInfo *
1313OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc,
1314 LoopBodyGenCallbackTy BodyGenCB,
1315 Value *TripCount, const Twine &Name) {
1316 BasicBlock *BB = Loc.IP.getBlock();
1317 BasicBlock *NextBB = BB->getNextNode();
1318
1319 CanonicalLoopInfo *CL = createLoopSkeleton(Loc.DL, TripCount, BB->getParent(),
1320 NextBB, NextBB, Name);
1321 BasicBlock *After = CL->getAfter();
1322
1323 // If location is not set, don't connect the loop.
1324 if (updateToLocation(Loc)) {
1325 // Split the loop at the insertion point: Branch to the preheader and move
1326 // every following instruction to after the loop (the After BB). Also, the
1327 // new successor is the loop's after block.
1328 Builder.CreateBr(CL->Preheader);
1329 After->getInstList().splice(After->begin(), BB->getInstList(),
1330 Builder.GetInsertPoint(), BB->end());
1331 After->replaceSuccessorsPhiUsesWith(BB, After);
1332 }
1333
1334 // Emit the body content. We do it after connecting the loop to the CFG to
1335 // avoid that the callback encounters degenerate BBs.
1336 BodyGenCB(CL->getBodyIP(), CL->getIndVar());
1337
1338#ifndef NDEBUG
1339 CL->assertOK();
1340#endif
1341 return CL;
1342}
1343
1344CanonicalLoopInfo *OpenMPIRBuilder::createCanonicalLoop(
1345 const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB,
1346 Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop,
1347 InsertPointTy ComputeIP, const Twine &Name) {
1348
1349 // Consider the following difficulties (assuming 8-bit signed integers):
1350 // * Adding \p Step to the loop counter which passes \p Stop may overflow:
1351 // DO I = 1, 100, 50
1352 /// * A \p Step of INT_MIN cannot not be normalized to a positive direction:
1353 // DO I = 100, 0, -128
1354
1355 // Start, Stop and Step must be of the same integer type.
1356 auto *IndVarTy = cast<IntegerType>(Start->getType());
1357 assert(IndVarTy == Stop->getType() && "Stop type mismatch")(static_cast <bool> (IndVarTy == Stop->getType() &&
"Stop type mismatch") ? void (0) : __assert_fail ("IndVarTy == Stop->getType() && \"Stop type mismatch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1357, __extension__ __PRETTY_FUNCTION__))
;
1358 assert(IndVarTy == Step->getType() && "Step type mismatch")(static_cast <bool> (IndVarTy == Step->getType() &&
"Step type mismatch") ? void (0) : __assert_fail ("IndVarTy == Step->getType() && \"Step type mismatch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1358, __extension__ __PRETTY_FUNCTION__))
;
1359
1360 LocationDescription ComputeLoc =
1361 ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc;
1362 updateToLocation(ComputeLoc);
1363
1364 ConstantInt *Zero = ConstantInt::get(IndVarTy, 0);
1365 ConstantInt *One = ConstantInt::get(IndVarTy, 1);
1366
1367 // Like Step, but always positive.
1368 Value *Incr = Step;
1369
1370 // Distance between Start and Stop; always positive.
1371 Value *Span;
1372
1373 // Condition whether there are no iterations are executed at all, e.g. because
1374 // UB < LB.
1375 Value *ZeroCmp;
1376
1377 if (IsSigned) {
1378 // Ensure that increment is positive. If not, negate and invert LB and UB.
1379 Value *IsNeg = Builder.CreateICmpSLT(Step, Zero);
1380 Incr = Builder.CreateSelect(IsNeg, Builder.CreateNeg(Step), Step);
1381 Value *LB = Builder.CreateSelect(IsNeg, Stop, Start);
1382 Value *UB = Builder.CreateSelect(IsNeg, Start, Stop);
1383 Span = Builder.CreateSub(UB, LB, "", false, true);
1384 ZeroCmp = Builder.CreateICmp(
1385 InclusiveStop ? CmpInst::ICMP_SLT : CmpInst::ICMP_SLE, UB, LB);
1386 } else {
1387 Span = Builder.CreateSub(Stop, Start, "", true);
1388 ZeroCmp = Builder.CreateICmp(
1389 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Stop, Start);
1390 }
1391
1392 Value *CountIfLooping;
1393 if (InclusiveStop) {
1394 CountIfLooping = Builder.CreateAdd(Builder.CreateUDiv(Span, Incr), One);
1395 } else {
1396 // Avoid incrementing past stop since it could overflow.
1397 Value *CountIfTwo = Builder.CreateAdd(
1398 Builder.CreateUDiv(Builder.CreateSub(Span, One), Incr), One);
1399 Value *OneCmp = Builder.CreateICmp(
1400 InclusiveStop ? CmpInst::ICMP_ULT : CmpInst::ICMP_ULE, Span, Incr);
1401 CountIfLooping = Builder.CreateSelect(OneCmp, One, CountIfTwo);
1402 }
1403 Value *TripCount = Builder.CreateSelect(ZeroCmp, Zero, CountIfLooping,
1404 "omp_" + Name + ".tripcount");
1405
1406 auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) {
1407 Builder.restoreIP(CodeGenIP);
1408 Value *Span = Builder.CreateMul(IV, Step);
1409 Value *IndVar = Builder.CreateAdd(Span, Start);
1410 BodyGenCB(Builder.saveIP(), IndVar);
1411 };
1412 LocationDescription LoopLoc = ComputeIP.isSet() ? Loc.IP : Builder.saveIP();
1413 return createCanonicalLoop(LoopLoc, BodyGen, TripCount, Name);
1414}
1415
1416// Returns an LLVM function to call for initializing loop bounds using OpenMP
1417// static scheduling depending on `type`. Only i32 and i64 are supported by the
1418// runtime. Always interpret integers as unsigned similarly to
1419// CanonicalLoopInfo.
1420static FunctionCallee getKmpcForStaticInitForType(Type *Ty, Module &M,
1421 OpenMPIRBuilder &OMPBuilder) {
1422 unsigned Bitwidth = Ty->getIntegerBitWidth();
1423 if (Bitwidth == 32)
1424 return OMPBuilder.getOrCreateRuntimeFunction(
1425 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_4u);
1426 if (Bitwidth == 64)
1427 return OMPBuilder.getOrCreateRuntimeFunction(
1428 M, omp::RuntimeFunction::OMPRTL___kmpc_for_static_init_8u);
1429 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1429)
;
1430}
1431
1432// Sets the number of loop iterations to the given value. This value must be
1433// valid in the condition block (i.e., defined in the preheader) and is
1434// interpreted as an unsigned integer.
1435void setCanonicalLoopTripCount(CanonicalLoopInfo *CLI, Value *TripCount) {
1436 Instruction *CmpI = &CLI->getCond()->front();
1437 assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount")(static_cast <bool> (isa<CmpInst>(CmpI) &&
"First inst must compare IV with TripCount") ? void (0) : __assert_fail
("isa<CmpInst>(CmpI) && \"First inst must compare IV with TripCount\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1437, __extension__ __PRETTY_FUNCTION__))
;
1438 CmpI->setOperand(1, TripCount);
1439 CLI->assertOK();
1440}
1441
1442OpenMPIRBuilder::InsertPointTy
1443OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
1444 InsertPointTy AllocaIP,
1445 bool NeedsBarrier, Value *Chunk) {
1446 assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1446, __extension__ __PRETTY_FUNCTION__))
;
1447
1448 // Set up the source location value for OpenMP runtime.
1449 Builder.restoreIP(CLI->getPreheaderIP());
1450 Builder.SetCurrentDebugLocation(DL);
1451
1452 Constant *SrcLocStr = getOrCreateSrcLocStr(DL);
1453 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1454
1455 // Declare useful OpenMP runtime functions.
1456 Value *IV = CLI->getIndVar();
1457 Type *IVTy = IV->getType();
1458 FunctionCallee StaticInit = getKmpcForStaticInitForType(IVTy, M, *this);
1459 FunctionCallee StaticFini =
1460 getOrCreateRuntimeFunction(M, omp::OMPRTL___kmpc_for_static_fini);
1461
1462 // Allocate space for computed loop bounds as expected by the "init" function.
1463 Builder.restoreIP(AllocaIP);
1464 Type *I32Type = Type::getInt32Ty(M.getContext());
1465 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1466 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1467 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1468 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1469
1470 // At the end of the preheader, prepare for calling the "init" function by
1471 // storing the current loop bounds into the allocated space. A canonical loop
1472 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1473 // and produces an inclusive upper bound.
1474 Builder.SetInsertPoint(CLI->getPreheader()->getTerminator());
1475 Constant *Zero = ConstantInt::get(IVTy, 0);
1476 Constant *One = ConstantInt::get(IVTy, 1);
1477 Builder.CreateStore(Zero, PLowerBound);
1478 Value *UpperBound = Builder.CreateSub(CLI->getTripCount(), One);
1479 Builder.CreateStore(UpperBound, PUpperBound);
1480 Builder.CreateStore(One, PStride);
1481
1482 // FIXME: schedule(static) is NOT the same as schedule(static,1)
1483 if (!Chunk)
1484 Chunk = One;
1485
1486 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1487
1488 Constant *SchedulingType =
1489 ConstantInt::get(I32Type, static_cast<int>(OMPScheduleType::Static));
1490
1491 // Call the "init" function and update the trip count of the loop with the
1492 // value it produced.
1493 Builder.CreateCall(StaticInit,
1494 {SrcLoc, ThreadNum, SchedulingType, PLastIter, PLowerBound,
1495 PUpperBound, PStride, One, Chunk});
1496 Value *LowerBound = Builder.CreateLoad(IVTy, PLowerBound);
1497 Value *InclusiveUpperBound = Builder.CreateLoad(IVTy, PUpperBound);
1498 Value *TripCountMinusOne = Builder.CreateSub(InclusiveUpperBound, LowerBound);
1499 Value *TripCount = Builder.CreateAdd(TripCountMinusOne, One);
1500 setCanonicalLoopTripCount(CLI, TripCount);
1501
1502 // Update all uses of the induction variable except the one in the condition
1503 // block that compares it with the actual upper bound, and the increment in
1504 // the latch block.
1505 // TODO: this can eventually move to CanonicalLoopInfo or to a new
1506 // CanonicalLoopInfoUpdater interface.
1507 Builder.SetInsertPoint(CLI->getBody(), CLI->getBody()->getFirstInsertionPt());
1508 Value *UpdatedIV = Builder.CreateAdd(IV, LowerBound);
1509 IV->replaceUsesWithIf(UpdatedIV, [&](Use &U) {
1510 auto *Instr = dyn_cast<Instruction>(U.getUser());
1511 return !Instr ||
1512 (Instr->getParent() != CLI->getCond() &&
1513 Instr->getParent() != CLI->getLatch() && Instr != UpdatedIV);
1514 });
1515
1516 // In the "exit" block, call the "fini" function.
1517 Builder.SetInsertPoint(CLI->getExit(),
1518 CLI->getExit()->getTerminator()->getIterator());
1519 Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
1520
1521 // Add the barrier if requested.
1522 if (NeedsBarrier)
1523 createBarrier(LocationDescription(Builder.saveIP(), DL),
1524 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1525 /* CheckCancelFlag */ false);
1526
1527 InsertPointTy AfterIP = CLI->getAfterIP();
1528 CLI->invalidate();
1529
1530 return AfterIP;
1531}
1532
1533OpenMPIRBuilder::InsertPointTy
1534OpenMPIRBuilder::applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
1535 InsertPointTy AllocaIP, bool NeedsBarrier) {
1536 // Currently only supports static schedules.
1537 return applyStaticWorkshareLoop(DL, CLI, AllocaIP, NeedsBarrier);
1538}
1539
1540/// Returns an LLVM function to call for initializing loop bounds using OpenMP
1541/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1542/// the runtime. Always interpret integers as unsigned similarly to
1543/// CanonicalLoopInfo.
1544static FunctionCallee
1545getKmpcForDynamicInitForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1546 unsigned Bitwidth = Ty->getIntegerBitWidth();
1547 if (Bitwidth == 32)
1548 return OMPBuilder.getOrCreateRuntimeFunction(
1549 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_4u);
1550 if (Bitwidth == 64)
1551 return OMPBuilder.getOrCreateRuntimeFunction(
1552 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_init_8u);
1553 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1553)
;
1554}
1555
1556/// Returns an LLVM function to call for updating the next loop using OpenMP
1557/// dynamic scheduling depending on `type`. Only i32 and i64 are supported by
1558/// the runtime. Always interpret integers as unsigned similarly to
1559/// CanonicalLoopInfo.
1560static FunctionCallee
1561getKmpcForDynamicNextForType(Type *Ty, Module &M, OpenMPIRBuilder &OMPBuilder) {
1562 unsigned Bitwidth = Ty->getIntegerBitWidth();
1563 if (Bitwidth == 32)
1564 return OMPBuilder.getOrCreateRuntimeFunction(
1565 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_4u);
1566 if (Bitwidth == 64)
1567 return OMPBuilder.getOrCreateRuntimeFunction(
1568 M, omp::RuntimeFunction::OMPRTL___kmpc_dispatch_next_8u);
1569 llvm_unreachable("unknown OpenMP loop iterator bitwidth")::llvm::llvm_unreachable_internal("unknown OpenMP loop iterator bitwidth"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1569)
;
1570}
1571
1572OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
1573 DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
1574 OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk) {
1575 assert(CLI->isValid() && "Requires a valid canonical loop")(static_cast <bool> (CLI->isValid() && "Requires a valid canonical loop"
) ? void (0) : __assert_fail ("CLI->isValid() && \"Requires a valid canonical loop\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1575, __extension__ __PRETTY_FUNCTION__))
;
1576
1577 // Set up the source location value for OpenMP runtime.
1578 Builder.SetCurrentDebugLocation(DL);
1579
1580 Constant *SrcLocStr = getOrCreateSrcLocStr(DL);
1581 Value *SrcLoc = getOrCreateIdent(SrcLocStr);
1582
1583 // Declare useful OpenMP runtime functions.
1584 Value *IV = CLI->getIndVar();
1585 Type *IVTy = IV->getType();
1586 FunctionCallee DynamicInit = getKmpcForDynamicInitForType(IVTy, M, *this);
1587 FunctionCallee DynamicNext = getKmpcForDynamicNextForType(IVTy, M, *this);
1588
1589 // Allocate space for computed loop bounds as expected by the "init" function.
1590 Builder.restoreIP(AllocaIP);
1591 Type *I32Type = Type::getInt32Ty(M.getContext());
1592 Value *PLastIter = Builder.CreateAlloca(I32Type, nullptr, "p.lastiter");
1593 Value *PLowerBound = Builder.CreateAlloca(IVTy, nullptr, "p.lowerbound");
1594 Value *PUpperBound = Builder.CreateAlloca(IVTy, nullptr, "p.upperbound");
1595 Value *PStride = Builder.CreateAlloca(IVTy, nullptr, "p.stride");
1596
1597 // At the end of the preheader, prepare for calling the "init" function by
1598 // storing the current loop bounds into the allocated space. A canonical loop
1599 // always iterates from 0 to trip-count with step 1. Note that "init" expects
1600 // and produces an inclusive upper bound.
1601 BasicBlock *PreHeader = CLI->getPreheader();
1602 Builder.SetInsertPoint(PreHeader->getTerminator());
1603 Constant *One = ConstantInt::get(IVTy, 1);
1604 Builder.CreateStore(One, PLowerBound);
1605 Value *UpperBound = CLI->getTripCount();
1606 Builder.CreateStore(UpperBound, PUpperBound);
1607 Builder.CreateStore(One, PStride);
1608
1609 BasicBlock *Header = CLI->getHeader();
1610 BasicBlock *Exit = CLI->getExit();
1611 BasicBlock *Cond = CLI->getCond();
1612 InsertPointTy AfterIP = CLI->getAfterIP();
1613
1614 // The CLI will be "broken" in the code below, as the loop is no longer
1615 // a valid canonical loop.
1616
1617 if (!Chunk)
1618 Chunk = One;
1619
1620 Value *ThreadNum = getOrCreateThreadID(SrcLoc);
1621
1622 Constant *SchedulingType =
1623 ConstantInt::get(I32Type, static_cast<int>(SchedType));
1624
1625 // Call the "init" function.
1626 Builder.CreateCall(DynamicInit,
1627 {SrcLoc, ThreadNum, SchedulingType, /* LowerBound */ One,
1628 UpperBound, /* step */ One, Chunk});
1629
1630 // An outer loop around the existing one.
1631 BasicBlock *OuterCond = BasicBlock::Create(
1632 PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
1633 PreHeader->getParent());
1634 // This needs to be 32-bit always, so can't use the IVTy Zero above.
1635 Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
1636 Value *Res =
1637 Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
1638 PLowerBound, PUpperBound, PStride});
1639 Constant *Zero32 = ConstantInt::get(I32Type, 0);
1640 Value *MoreWork = Builder.CreateCmp(CmpInst::ICMP_NE, Res, Zero32);
1641 Value *LowerBound =
1642 Builder.CreateSub(Builder.CreateLoad(IVTy, PLowerBound), One, "lb");
1643 Builder.CreateCondBr(MoreWork, Header, Exit);
1644
1645 // Change PHI-node in loop header to use outer cond rather than preheader,
1646 // and set IV to the LowerBound.
1647 Instruction *Phi = &Header->front();
1648 auto *PI = cast<PHINode>(Phi);
1649 PI->setIncomingBlock(0, OuterCond);
1650 PI->setIncomingValue(0, LowerBound);
1651
1652 // Then set the pre-header to jump to the OuterCond
1653 Instruction *Term = PreHeader->getTerminator();
1654 auto *Br = cast<BranchInst>(Term);
1655 Br->setSuccessor(0, OuterCond);
1656
1657 // Modify the inner condition:
1658 // * Use the UpperBound returned from the DynamicNext call.
1659 // * jump to the loop outer loop when done with one of the inner loops.
1660 Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
1661 UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
1662 Instruction *Comp = &*Builder.GetInsertPoint();
1663 auto *CI = cast<CmpInst>(Comp);
1664 CI->setOperand(1, UpperBound);
1665 // Redirect the inner exit to branch to outer condition.
1666 Instruction *Branch = &Cond->back();
1667 auto *BI = cast<BranchInst>(Branch);
1668 assert(BI->getSuccessor(1) == Exit)(static_cast <bool> (BI->getSuccessor(1) == Exit) ? void
(0) : __assert_fail ("BI->getSuccessor(1) == Exit", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1668, __extension__ __PRETTY_FUNCTION__))
;
1669 BI->setSuccessor(1, OuterCond);
1670
1671 // Add the barrier if requested.
1672 if (NeedsBarrier) {
1673 Builder.SetInsertPoint(&Exit->back());
1674 createBarrier(LocationDescription(Builder.saveIP(), DL),
1675 omp::Directive::OMPD_for, /* ForceSimpleCall */ false,
1676 /* CheckCancelFlag */ false);
1677 }
1678
1679 CLI->invalidate();
1680 return AfterIP;
1681}
1682
1683/// Make \p Source branch to \p Target.
1684///
1685/// Handles two situations:
1686/// * \p Source already has an unconditional branch.
1687/// * \p Source is a degenerate block (no terminator because the BB is
1688/// the current head of the IR construction).
1689static void redirectTo(BasicBlock *Source, BasicBlock *Target, DebugLoc DL) {
1690 if (Instruction *Term = Source->getTerminator()) {
1691 auto *Br = cast<BranchInst>(Term);
1692 assert(!Br->isConditional() &&(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1693, __extension__ __PRETTY_FUNCTION__))
1693 "BB's terminator must be an unconditional branch (or degenerate)")(static_cast <bool> (!Br->isConditional() &&
"BB's terminator must be an unconditional branch (or degenerate)"
) ? void (0) : __assert_fail ("!Br->isConditional() && \"BB's terminator must be an unconditional branch (or degenerate)\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1693, __extension__ __PRETTY_FUNCTION__))
;
1694 BasicBlock *Succ = Br->getSuccessor(0);
1695 Succ->removePredecessor(Source, /*KeepOneInputPHIs=*/true);
1696 Br->setSuccessor(0, Target);
1697 return;
1698 }
1699
1700 auto *NewBr = BranchInst::Create(Target, Source);
1701 NewBr->setDebugLoc(DL);
1702}
1703
1704/// Redirect all edges that branch to \p OldTarget to \p NewTarget. That is,
1705/// after this \p OldTarget will be orphaned.
1706static void redirectAllPredecessorsTo(BasicBlock *OldTarget,
1707 BasicBlock *NewTarget, DebugLoc DL) {
1708 for (BasicBlock *Pred : make_early_inc_range(predecessors(OldTarget)))
1709 redirectTo(Pred, NewTarget, DL);
1710}
1711
1712/// Determine which blocks in \p BBs are reachable from outside and remove the
1713/// ones that are not reachable from the function.
1714static void removeUnusedBlocksFromParent(ArrayRef<BasicBlock *> BBs) {
1715 SmallPtrSet<BasicBlock *, 6> BBsToErase{BBs.begin(), BBs.end()};
1716 auto HasRemainingUses = [&BBsToErase](BasicBlock *BB) {
1717 for (Use &U : BB->uses()) {
1718 auto *UseInst = dyn_cast<Instruction>(U.getUser());
1719 if (!UseInst)
1720 continue;
1721 if (BBsToErase.count(UseInst->getParent()))
1722 continue;
1723 return true;
1724 }
1725 return false;
1726 };
1727
1728 while (true) {
1729 bool Changed = false;
1730 for (BasicBlock *BB : make_early_inc_range(BBsToErase)) {
1731 if (HasRemainingUses(BB)) {
1732 BBsToErase.erase(BB);
1733 Changed = true;
1734 }
1735 }
1736 if (!Changed)
1737 break;
1738 }
1739
1740 SmallVector<BasicBlock *, 7> BBVec(BBsToErase.begin(), BBsToErase.end());
1741 DeleteDeadBlocks(BBVec);
1742}
1743
1744CanonicalLoopInfo *
1745OpenMPIRBuilder::collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1746 InsertPointTy ComputeIP) {
1747 assert(Loops.size() >= 1 && "At least one loop required")(static_cast <bool> (Loops.size() >= 1 && "At least one loop required"
) ? void (0) : __assert_fail ("Loops.size() >= 1 && \"At least one loop required\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1747, __extension__ __PRETTY_FUNCTION__))
;
1748 size_t NumLoops = Loops.size();
1749
1750 // Nothing to do if there is already just one loop.
1751 if (NumLoops == 1)
1752 return Loops.front();
1753
1754 CanonicalLoopInfo *Outermost = Loops.front();
1755 CanonicalLoopInfo *Innermost = Loops.back();
1756 BasicBlock *OrigPreheader = Outermost->getPreheader();
1757 BasicBlock *OrigAfter = Outermost->getAfter();
1758 Function *F = OrigPreheader->getParent();
1759
1760 // Setup the IRBuilder for inserting the trip count computation.
1761 Builder.SetCurrentDebugLocation(DL);
1762 if (ComputeIP.isSet())
1763 Builder.restoreIP(ComputeIP);
1764 else
1765 Builder.restoreIP(Outermost->getPreheaderIP());
1766
1767 // Derive the collapsed' loop trip count.
1768 // TODO: Find common/largest indvar type.
1769 Value *CollapsedTripCount = nullptr;
1770 for (CanonicalLoopInfo *L : Loops) {
1771 assert(L->isValid() &&(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1772, __extension__ __PRETTY_FUNCTION__))
1772 "All loops to collapse must be valid canonical loops")(static_cast <bool> (L->isValid() && "All loops to collapse must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All loops to collapse must be valid canonical loops\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1772, __extension__ __PRETTY_FUNCTION__))
;
1773 Value *OrigTripCount = L->getTripCount();
1774 if (!CollapsedTripCount) {
1775 CollapsedTripCount = OrigTripCount;
1776 continue;
1777 }
1778
1779 // TODO: Enable UndefinedSanitizer to diagnose an overflow here.
1780 CollapsedTripCount = Builder.CreateMul(CollapsedTripCount, OrigTripCount,
1781 {}, /*HasNUW=*/true);
1782 }
1783
1784 // Create the collapsed loop control flow.
1785 CanonicalLoopInfo *Result =
1786 createLoopSkeleton(DL, CollapsedTripCount, F,
1787 OrigPreheader->getNextNode(), OrigAfter, "collapsed");
1788
1789 // Build the collapsed loop body code.
1790 // Start with deriving the input loop induction variables from the collapsed
1791 // one, using a divmod scheme. To preserve the original loops' order, the
1792 // innermost loop use the least significant bits.
1793 Builder.restoreIP(Result->getBodyIP());
1794
1795 Value *Leftover = Result->getIndVar();
1796 SmallVector<Value *> NewIndVars;
1797 NewIndVars.set_size(NumLoops);
1798 for (int i = NumLoops - 1; i >= 1; --i) {
1799 Value *OrigTripCount = Loops[i]->getTripCount();
1800
1801 Value *NewIndVar = Builder.CreateURem(Leftover, OrigTripCount);
1802 NewIndVars[i] = NewIndVar;
1803
1804 Leftover = Builder.CreateUDiv(Leftover, OrigTripCount);
1805 }
1806 // Outermost loop gets all the remaining bits.
1807 NewIndVars[0] = Leftover;
1808
1809 // Construct the loop body control flow.
1810 // We progressively construct the branch structure following in direction of
1811 // the control flow, from the leading in-between code, the loop nest body, the
1812 // trailing in-between code, and rejoining the collapsed loop's latch.
1813 // ContinueBlock and ContinuePred keep track of the source(s) of next edge. If
1814 // the ContinueBlock is set, continue with that block. If ContinuePred, use
1815 // its predecessors as sources.
1816 BasicBlock *ContinueBlock = Result->getBody();
1817 BasicBlock *ContinuePred = nullptr;
1818 auto ContinueWith = [&ContinueBlock, &ContinuePred, DL](BasicBlock *Dest,
1819 BasicBlock *NextSrc) {
1820 if (ContinueBlock)
1821 redirectTo(ContinueBlock, Dest, DL);
1822 else
1823 redirectAllPredecessorsTo(ContinuePred, Dest, DL);
1824
1825 ContinueBlock = nullptr;
1826 ContinuePred = NextSrc;
1827 };
1828
1829 // The code before the nested loop of each level.
1830 // Because we are sinking it into the nest, it will be executed more often
1831 // that the original loop. More sophisticated schemes could keep track of what
1832 // the in-between code is and instantiate it only once per thread.
1833 for (size_t i = 0; i < NumLoops - 1; ++i)
1834 ContinueWith(Loops[i]->getBody(), Loops[i + 1]->getHeader());
1835
1836 // Connect the loop nest body.
1837 ContinueWith(Innermost->getBody(), Innermost->getLatch());
1838
1839 // The code after the nested loop at each level.
1840 for (size_t i = NumLoops - 1; i > 0; --i)
1841 ContinueWith(Loops[i]->getAfter(), Loops[i - 1]->getLatch());
1842
1843 // Connect the finished loop to the collapsed loop latch.
1844 ContinueWith(Result->getLatch(), nullptr);
1845
1846 // Replace the input loops with the new collapsed loop.
1847 redirectTo(Outermost->getPreheader(), Result->getPreheader(), DL);
1848 redirectTo(Result->getAfter(), Outermost->getAfter(), DL);
1849
1850 // Replace the input loop indvars with the derived ones.
1851 for (size_t i = 0; i < NumLoops; ++i)
1852 Loops[i]->getIndVar()->replaceAllUsesWith(NewIndVars[i]);
1853
1854 // Remove unused parts of the input loops.
1855 SmallVector<BasicBlock *, 12> OldControlBBs;
1856 OldControlBBs.reserve(6 * Loops.size());
1857 for (CanonicalLoopInfo *Loop : Loops)
1858 Loop->collectControlBlocks(OldControlBBs);
1859 removeUnusedBlocksFromParent(OldControlBBs);
1860
1861 for (CanonicalLoopInfo *L : Loops)
1862 L->invalidate();
1863
1864#ifndef NDEBUG
1865 Result->assertOK();
1866#endif
1867 return Result;
1868}
1869
1870std::vector<CanonicalLoopInfo *>
1871OpenMPIRBuilder::tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
1872 ArrayRef<Value *> TileSizes) {
1873 assert(TileSizes.size() == Loops.size() &&(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1874, __extension__ __PRETTY_FUNCTION__))
1874 "Must pass as many tile sizes as there are loops")(static_cast <bool> (TileSizes.size() == Loops.size() &&
"Must pass as many tile sizes as there are loops") ? void (0
) : __assert_fail ("TileSizes.size() == Loops.size() && \"Must pass as many tile sizes as there are loops\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1874, __extension__ __PRETTY_FUNCTION__))
;
1875 int NumLoops = Loops.size();
1876 assert(NumLoops >= 1 && "At least one loop to tile required")(static_cast <bool> (NumLoops >= 1 && "At least one loop to tile required"
) ? void (0) : __assert_fail ("NumLoops >= 1 && \"At least one loop to tile required\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1876, __extension__ __PRETTY_FUNCTION__))
;
1877
1878 CanonicalLoopInfo *OutermostLoop = Loops.front();
1879 CanonicalLoopInfo *InnermostLoop = Loops.back();
1880 Function *F = OutermostLoop->getBody()->getParent();
1881 BasicBlock *InnerEnter = InnermostLoop->getBody();
1882 BasicBlock *InnerLatch = InnermostLoop->getLatch();
1883
1884 // Collect original trip counts and induction variable to be accessible by
1885 // index. Also, the structure of the original loops is not preserved during
1886 // the construction of the tiled loops, so do it before we scavenge the BBs of
1887 // any original CanonicalLoopInfo.
1888 SmallVector<Value *, 4> OrigTripCounts, OrigIndVars;
1889 for (CanonicalLoopInfo *L : Loops) {
1890 assert(L->isValid() && "All input loops must be valid canonical loops")(static_cast <bool> (L->isValid() && "All input loops must be valid canonical loops"
) ? void (0) : __assert_fail ("L->isValid() && \"All input loops must be valid canonical loops\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 1890, __extension__ __PRETTY_FUNCTION__))
;
1891 OrigTripCounts.push_back(L->getTripCount());
1892 OrigIndVars.push_back(L->getIndVar());
1893 }
1894
1895 // Collect the code between loop headers. These may contain SSA definitions
1896 // that are used in the loop nest body. To be usable with in the innermost
1897 // body, these BasicBlocks will be sunk into the loop nest body. That is,
1898 // these instructions may be executed more often than before the tiling.
1899 // TODO: It would be sufficient to only sink them into body of the
1900 // corresponding tile loop.
1901 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 4> InbetweenCode;
1902 for (int i = 0; i < NumLoops - 1; ++i) {
1903 CanonicalLoopInfo *Surrounding = Loops[i];
1904 CanonicalLoopInfo *Nested = Loops[i + 1];
1905
1906 BasicBlock *EnterBB = Surrounding->getBody();
1907 BasicBlock *ExitBB = Nested->getHeader();
1908 InbetweenCode.emplace_back(EnterBB, ExitBB);
1909 }
1910
1911 // Compute the trip counts of the floor loops.
1912 Builder.SetCurrentDebugLocation(DL);
1913 Builder.restoreIP(OutermostLoop->getPreheaderIP());
1914 SmallVector<Value *, 4> FloorCount, FloorRems;
1915 for (int i = 0; i < NumLoops; ++i) {
1916 Value *TileSize = TileSizes[i];
1917 Value *OrigTripCount = OrigTripCounts[i];
1918 Type *IVType = OrigTripCount->getType();
1919
1920 Value *FloorTripCount = Builder.CreateUDiv(OrigTripCount, TileSize);
1921 Value *FloorTripRem = Builder.CreateURem(OrigTripCount, TileSize);
1922
1923 // 0 if tripcount divides the tilesize, 1 otherwise.
1924 // 1 means we need an additional iteration for a partial tile.
1925 //
1926 // Unfortunately we cannot just use the roundup-formula
1927 // (tripcount + tilesize - 1)/tilesize
1928 // because the summation might overflow. We do not want introduce undefined
1929 // behavior when the untiled loop nest did not.
1930 Value *FloorTripOverflow =
1931 Builder.CreateICmpNE(FloorTripRem, ConstantInt::get(IVType, 0));
1932
1933 FloorTripOverflow = Builder.CreateZExt(FloorTripOverflow, IVType);
1934 FloorTripCount =
1935 Builder.CreateAdd(FloorTripCount, FloorTripOverflow,
1936 "omp_floor" + Twine(i) + ".tripcount", true);
1937
1938 // Remember some values for later use.
1939 FloorCount.push_back(FloorTripCount);
1940 FloorRems.push_back(FloorTripRem);
1941 }
1942
1943 // Generate the new loop nest, from the outermost to the innermost.
1944 std::vector<CanonicalLoopInfo *> Result;
1945 Result.reserve(NumLoops * 2);
1946
1947 // The basic block of the surrounding loop that enters the nest generated
1948 // loop.
1949 BasicBlock *Enter = OutermostLoop->getPreheader();
1950
1951 // The basic block of the surrounding loop where the inner code should
1952 // continue.
1953 BasicBlock *Continue = OutermostLoop->getAfter();
1954
1955 // Where the next loop basic block should be inserted.
1956 BasicBlock *OutroInsertBefore = InnermostLoop->getExit();
1957
1958 auto EmbeddNewLoop =
1959 [this, DL, F, InnerEnter, &Enter, &Continue, &OutroInsertBefore](
1960 Value *TripCount, const Twine &Name) -> CanonicalLoopInfo * {
1961 CanonicalLoopInfo *EmbeddedLoop = createLoopSkeleton(
1962 DL, TripCount, F, InnerEnter, OutroInsertBefore, Name);
1963 redirectTo(Enter, EmbeddedLoop->getPreheader(), DL);
1964 redirectTo(EmbeddedLoop->getAfter(), Continue, DL);
1965
1966 // Setup the position where the next embedded loop connects to this loop.
1967 Enter = EmbeddedLoop->getBody();
1968 Continue = EmbeddedLoop->getLatch();
1969 OutroInsertBefore = EmbeddedLoop->getLatch();
1970 return EmbeddedLoop;
1971 };
1972
1973 auto EmbeddNewLoops = [&Result, &EmbeddNewLoop](ArrayRef<Value *> TripCounts,
1974 const Twine &NameBase) {
1975 for (auto P : enumerate(TripCounts)) {
1976 CanonicalLoopInfo *EmbeddedLoop =
1977 EmbeddNewLoop(P.value(), NameBase + Twine(P.index()));
1978 Result.push_back(EmbeddedLoop);
1979 }
1980 };
1981
1982 EmbeddNewLoops(FloorCount, "floor");
1983
1984 // Within the innermost floor loop, emit the code that computes the tile
1985 // sizes.
1986 Builder.SetInsertPoint(Enter->getTerminator());
1987 SmallVector<Value *, 4> TileCounts;
1988 for (int i = 0; i < NumLoops; ++i) {
1989 CanonicalLoopInfo *FloorLoop = Result[i];
1990 Value *TileSize = TileSizes[i];
1991
1992 Value *FloorIsEpilogue =
1993 Builder.CreateICmpEQ(FloorLoop->getIndVar(), FloorCount[i]);
1994 Value *TileTripCount =
1995 Builder.CreateSelect(FloorIsEpilogue, FloorRems[i], TileSize);
1996
1997 TileCounts.push_back(TileTripCount);
1998 }
1999
2000 // Create the tile loops.
2001 EmbeddNewLoops(TileCounts, "tile");
2002
2003 // Insert the inbetween code into the body.
2004 BasicBlock *BodyEnter = Enter;
2005 BasicBlock *BodyEntered = nullptr;
2006 for (std::pair<BasicBlock *, BasicBlock *> P : InbetweenCode) {
2007 BasicBlock *EnterBB = P.first;
2008 BasicBlock *ExitBB = P.second;
2009
2010 if (BodyEnter)
2011 redirectTo(BodyEnter, EnterBB, DL);
2012 else
2013 redirectAllPredecessorsTo(BodyEntered, EnterBB, DL);
2014
2015 BodyEnter = nullptr;
2016 BodyEntered = ExitBB;
2017 }
2018
2019 // Append the original loop nest body into the generated loop nest body.
2020 if (BodyEnter)
2021 redirectTo(BodyEnter, InnerEnter, DL);
2022 else
2023 redirectAllPredecessorsTo(BodyEntered, InnerEnter, DL);
2024 redirectAllPredecessorsTo(InnerLatch, Continue, DL);
2025
2026 // Replace the original induction variable with an induction variable computed
2027 // from the tile and floor induction variables.
2028 Builder.restoreIP(Result.back()->getBodyIP());
2029 for (int i = 0; i < NumLoops; ++i) {
2030 CanonicalLoopInfo *FloorLoop = Result[i];
2031 CanonicalLoopInfo *TileLoop = Result[NumLoops + i];
2032 Value *OrigIndVar = OrigIndVars[i];
2033 Value *Size = TileSizes[i];
2034
2035 Value *Scale =
2036 Builder.CreateMul(Size, FloorLoop->getIndVar(), {}, /*HasNUW=*/true);
2037 Value *Shift =
2038 Builder.CreateAdd(Scale, TileLoop->getIndVar(), {}, /*HasNUW=*/true);
2039 OrigIndVar->replaceAllUsesWith(Shift);
2040 }
2041
2042 // Remove unused parts of the original loops.
2043 SmallVector<BasicBlock *, 12> OldControlBBs;
2044 OldControlBBs.reserve(6 * Loops.size());
2045 for (CanonicalLoopInfo *Loop : Loops)
2046 Loop->collectControlBlocks(OldControlBBs);
2047 removeUnusedBlocksFromParent(OldControlBBs);
2048
2049 for (CanonicalLoopInfo *L : Loops)
2050 L->invalidate();
2051
2052#ifndef NDEBUG
2053 for (CanonicalLoopInfo *GenL : Result)
2054 GenL->assertOK();
2055#endif
2056 return Result;
2057}
2058
2059OpenMPIRBuilder::InsertPointTy
2060OpenMPIRBuilder::createCopyPrivate(const LocationDescription &Loc,
2061 llvm::Value *BufSize, llvm::Value *CpyBuf,
2062 llvm::Value *CpyFn, llvm::Value *DidIt) {
2063 if (!updateToLocation(Loc))
2064 return Loc.IP;
2065
2066 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2067 Value *Ident = getOrCreateIdent(SrcLocStr);
2068 Value *ThreadId = getOrCreateThreadID(Ident);
2069
2070 llvm::Value *DidItLD = Builder.CreateLoad(Builder.getInt32Ty(), DidIt);
2071
2072 Value *Args[] = {Ident, ThreadId, BufSize, CpyBuf, CpyFn, DidItLD};
2073
2074 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_copyprivate);
2075 Builder.CreateCall(Fn, Args);
2076
2077 return Builder.saveIP();
2078}
2079
2080OpenMPIRBuilder::InsertPointTy
2081OpenMPIRBuilder::createSingle(const LocationDescription &Loc,
2082 BodyGenCallbackTy BodyGenCB,
2083 FinalizeCallbackTy FiniCB, llvm::Value *DidIt) {
2084
2085 if (!updateToLocation(Loc))
2086 return Loc.IP;
2087
2088 // If needed (i.e. not null), initialize `DidIt` with 0
2089 if (DidIt) {
2090 Builder.CreateStore(Builder.getInt32(0), DidIt);
2091 }
2092
2093 Directive OMPD = Directive::OMPD_single;
2094 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2095 Value *Ident = getOrCreateIdent(SrcLocStr);
2096 Value *ThreadId = getOrCreateThreadID(Ident);
2097 Value *Args[] = {Ident, ThreadId};
2098
2099 Function *EntryRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_single);
2100 Instruction *EntryCall = Builder.CreateCall(EntryRTLFn, Args);
2101
2102 Function *ExitRTLFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_single);
2103 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2104
2105 // generates the following:
2106 // if (__kmpc_single()) {
2107 // .... single region ...
2108 // __kmpc_end_single
2109 // }
2110
2111 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2112 /*Conditional*/ true, /*hasFinalize*/ true);
2113}
2114
2115OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCritical(
2116 const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB,
2117 FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst) {
2118
2119 if (!updateToLocation(Loc))
2120 return Loc.IP;
2121
2122 Directive OMPD = Directive::OMPD_critical;
2123 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2124 Value *Ident = getOrCreateIdent(SrcLocStr);
2125 Value *ThreadId = getOrCreateThreadID(Ident);
2126 Value *LockVar = getOMPCriticalRegionLock(CriticalName);
2127 Value *Args[] = {Ident, ThreadId, LockVar};
2128
2129 SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), std::end(Args));
2130 Function *RTFn = nullptr;
2131 if (HintInst) {
2132 // Add Hint to entry Args and create call
2133 EnterArgs.push_back(HintInst);
2134 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical_with_hint);
2135 } else {
2136 RTFn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_critical);
2137 }
2138 Instruction *EntryCall = Builder.CreateCall(RTFn, EnterArgs);
2139
2140 Function *ExitRTLFn =
2141 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_end_critical);
2142 Instruction *ExitCall = Builder.CreateCall(ExitRTLFn, Args);
2143
2144 return EmitOMPInlinedRegion(OMPD, EntryCall, ExitCall, BodyGenCB, FiniCB,
2145 /*Conditional*/ false, /*hasFinalize*/ true);
2146}
2147
2148OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::EmitOMPInlinedRegion(
2149 Directive OMPD, Instruction *EntryCall, Instruction *ExitCall,
2150 BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional,
2151 bool HasFinalize, bool IsCancellable) {
2152
2153 if (HasFinalize)
2154 FinalizationStack.push_back({FiniCB, OMPD, IsCancellable});
2155
2156 // Create inlined region's entry and body blocks, in preparation
2157 // for conditional creation
2158 BasicBlock *EntryBB = Builder.GetInsertBlock();
2159 Instruction *SplitPos = EntryBB->getTerminator();
2160 if (!isa_and_nonnull<BranchInst>(SplitPos))
2161 SplitPos = new UnreachableInst(Builder.getContext(), EntryBB);
2162 BasicBlock *ExitBB = EntryBB->splitBasicBlock(SplitPos, "omp_region.end");
2163 BasicBlock *FiniBB =
2164 EntryBB->splitBasicBlock(EntryBB->getTerminator(), "omp_region.finalize");
2165
2166 Builder.SetInsertPoint(EntryBB->getTerminator());
2167 emitCommonDirectiveEntry(OMPD, EntryCall, ExitBB, Conditional);
2168
2169 // generate body
2170 BodyGenCB(/* AllocaIP */ InsertPointTy(),
2171 /* CodeGenIP */ Builder.saveIP(), *FiniBB);
2172
2173 // If we didn't emit a branch to FiniBB during body generation, it means
2174 // FiniBB is unreachable (e.g. while(1);). stop generating all the
2175 // unreachable blocks, and remove anything we are not going to use.
2176 auto SkipEmittingRegion = FiniBB->hasNPredecessors(0);
2177 if (SkipEmittingRegion) {
2178 FiniBB->eraseFromParent();
2179 ExitCall->eraseFromParent();
2180 // Discard finalization if we have it.
2181 if (HasFinalize) {
2182 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2183, __extension__ __PRETTY_FUNCTION__))
2183 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2183, __extension__ __PRETTY_FUNCTION__))
;
2184 FinalizationStack.pop_back();
2185 }
2186 } else {
2187 // emit exit call and do any needed finalization.
2188 auto FinIP = InsertPointTy(FiniBB, FiniBB->getFirstInsertionPt());
2189 assert(FiniBB->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2191, __extension__ __PRETTY_FUNCTION__))
2190 FiniBB->getTerminator()->getSuccessor(0) == ExitBB &&(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2191, __extension__ __PRETTY_FUNCTION__))
2191 "Unexpected control flow graph state!!")(static_cast <bool> (FiniBB->getTerminator()->getNumSuccessors
() == 1 && FiniBB->getTerminator()->getSuccessor
(0) == ExitBB && "Unexpected control flow graph state!!"
) ? void (0) : __assert_fail ("FiniBB->getTerminator()->getNumSuccessors() == 1 && FiniBB->getTerminator()->getSuccessor(0) == ExitBB && \"Unexpected control flow graph state!!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2191, __extension__ __PRETTY_FUNCTION__))
;
2192 emitCommonDirectiveExit(OMPD, FinIP, ExitCall, HasFinalize);
2193 assert(FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB &&(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2194, __extension__ __PRETTY_FUNCTION__))
2194 "Unexpected Control Flow State!")(static_cast <bool> (FiniBB->getUniquePredecessor()->
getUniqueSuccessor() == FiniBB && "Unexpected Control Flow State!"
) ? void (0) : __assert_fail ("FiniBB->getUniquePredecessor()->getUniqueSuccessor() == FiniBB && \"Unexpected Control Flow State!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2194, __extension__ __PRETTY_FUNCTION__))
;
2195 MergeBlockIntoPredecessor(FiniBB);
2196 }
2197
2198 // If we are skipping the region of a non conditional, remove the exit
2199 // block, and clear the builder's insertion point.
2200 assert(SplitPos->getParent() == ExitBB &&(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2201, __extension__ __PRETTY_FUNCTION__))
2201 "Unexpected Insertion point location!")(static_cast <bool> (SplitPos->getParent() == ExitBB
&& "Unexpected Insertion point location!") ? void (0
) : __assert_fail ("SplitPos->getParent() == ExitBB && \"Unexpected Insertion point location!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2201, __extension__ __PRETTY_FUNCTION__))
;
2202 if (!Conditional && SkipEmittingRegion) {
2203 ExitBB->eraseFromParent();
2204 Builder.ClearInsertionPoint();
2205 } else {
2206 auto merged = MergeBlockIntoPredecessor(ExitBB);
2207 BasicBlock *ExitPredBB = SplitPos->getParent();
2208 auto InsertBB = merged ? ExitPredBB : ExitBB;
2209 if (!isa_and_nonnull<BranchInst>(SplitPos))
2210 SplitPos->eraseFromParent();
2211 Builder.SetInsertPoint(InsertBB);
2212 }
2213
2214 return Builder.saveIP();
2215}
2216
2217OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveEntry(
2218 Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional) {
2219 // if nothing to do, Return current insertion point.
2220 if (!Conditional || !EntryCall)
2221 return Builder.saveIP();
2222
2223 BasicBlock *EntryBB = Builder.GetInsertBlock();
2224 Value *CallBool = Builder.CreateIsNotNull(EntryCall);
2225 auto *ThenBB = BasicBlock::Create(M.getContext(), "omp_region.body");
2226 auto *UI = new UnreachableInst(Builder.getContext(), ThenBB);
2227
2228 // Emit thenBB and set the Builder's insertion point there for
2229 // body generation next. Place the block after the current block.
2230 Function *CurFn = EntryBB->getParent();
2231 CurFn->getBasicBlockList().insertAfter(EntryBB->getIterator(), ThenBB);
2232
2233 // Move Entry branch to end of ThenBB, and replace with conditional
2234 // branch (If-stmt)
2235 Instruction *EntryBBTI = EntryBB->getTerminator();
2236 Builder.CreateCondBr(CallBool, ThenBB, ExitBB);
2237 EntryBBTI->removeFromParent();
2238 Builder.SetInsertPoint(UI);
2239 Builder.Insert(EntryBBTI);
2240 UI->eraseFromParent();
2241 Builder.SetInsertPoint(ThenBB->getTerminator());
2242
2243 // return an insertion point to ExitBB.
2244 return IRBuilder<>::InsertPoint(ExitBB, ExitBB->getFirstInsertionPt());
2245}
2246
2247OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::emitCommonDirectiveExit(
2248 omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall,
2249 bool HasFinalize) {
2250
2251 Builder.restoreIP(FinIP);
2252
2253 // If there is finalization to do, emit it before the exit call
2254 if (HasFinalize) {
2255 assert(!FinalizationStack.empty() &&(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2256, __extension__ __PRETTY_FUNCTION__))
2256 "Unexpected finalization stack state!")(static_cast <bool> (!FinalizationStack.empty() &&
"Unexpected finalization stack state!") ? void (0) : __assert_fail
("!FinalizationStack.empty() && \"Unexpected finalization stack state!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2256, __extension__ __PRETTY_FUNCTION__))
;
2257
2258 FinalizationInfo Fi = FinalizationStack.pop_back_val();
2259 assert(Fi.DK == OMPD && "Unexpected Directive for Finalization call!")(static_cast <bool> (Fi.DK == OMPD && "Unexpected Directive for Finalization call!"
) ? void (0) : __assert_fail ("Fi.DK == OMPD && \"Unexpected Directive for Finalization call!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2259, __extension__ __PRETTY_FUNCTION__))
;
2260
2261 Fi.FiniCB(FinIP);
2262
2263 BasicBlock *FiniBB = FinIP.getBlock();
2264 Instruction *FiniBBTI = FiniBB->getTerminator();
2265
2266 // set Builder IP for call creation
2267 Builder.SetInsertPoint(FiniBBTI);
2268 }
2269
2270 if (!ExitCall)
2271 return Builder.saveIP();
2272
2273 // place the Exitcall as last instruction before Finalization block terminator
2274 ExitCall->removeFromParent();
2275 Builder.Insert(ExitCall);
2276
2277 return IRBuilder<>::InsertPoint(ExitCall->getParent(),
2278 ExitCall->getIterator());
2279}
2280
2281OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createCopyinClauseBlocks(
2282 InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr,
2283 llvm::IntegerType *IntPtrTy, bool BranchtoEnd) {
2284 if (!IP.isSet())
2285 return IP;
2286
2287 IRBuilder<>::InsertPointGuard IPG(Builder);
2288
2289 // creates the following CFG structure
2290 // OMP_Entry : (MasterAddr != PrivateAddr)?
2291 // F T
2292 // | \
2293 // | copin.not.master
2294 // | /
2295 // v /
2296 // copyin.not.master.end
2297 // |
2298 // v
2299 // OMP.Entry.Next
2300
2301 BasicBlock *OMP_Entry = IP.getBlock();
2302 Function *CurFn = OMP_Entry->getParent();
2303 BasicBlock *CopyBegin =
2304 BasicBlock::Create(M.getContext(), "copyin.not.master", CurFn);
2305 BasicBlock *CopyEnd = nullptr;
2306
2307 // If entry block is terminated, split to preserve the branch to following
2308 // basic block (i.e. OMP.Entry.Next), otherwise, leave everything as is.
2309 if (isa_and_nonnull<BranchInst>(OMP_Entry->getTerminator())) {
2310 CopyEnd = OMP_Entry->splitBasicBlock(OMP_Entry->getTerminator(),
2311 "copyin.not.master.end");
2312 OMP_Entry->getTerminator()->eraseFromParent();
2313 } else {
2314 CopyEnd =
2315 BasicBlock::Create(M.getContext(), "copyin.not.master.end", CurFn);
2316 }
2317
2318 Builder.SetInsertPoint(OMP_Entry);
2319 Value *MasterPtr = Builder.CreatePtrToInt(MasterAddr, IntPtrTy);
2320 Value *PrivatePtr = Builder.CreatePtrToInt(PrivateAddr, IntPtrTy);
2321 Value *cmp = Builder.CreateICmpNE(MasterPtr, PrivatePtr);
2322 Builder.CreateCondBr(cmp, CopyBegin, CopyEnd);
2323
2324 Builder.SetInsertPoint(CopyBegin);
2325 if (BranchtoEnd)
2326 Builder.SetInsertPoint(Builder.CreateBr(CopyEnd));
2327
2328 return Builder.saveIP();
2329}
2330
2331CallInst *OpenMPIRBuilder::createOMPAlloc(const LocationDescription &Loc,
2332 Value *Size, Value *Allocator,
2333 std::string Name) {
2334 IRBuilder<>::InsertPointGuard IPG(Builder);
2335 Builder.restoreIP(Loc.IP);
2336
2337 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2338 Value *Ident = getOrCreateIdent(SrcLocStr);
2339 Value *ThreadId = getOrCreateThreadID(Ident);
2340 Value *Args[] = {ThreadId, Size, Allocator};
2341
2342 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_alloc);
2343
2344 return Builder.CreateCall(Fn, Args, Name);
2345}
2346
2347CallInst *OpenMPIRBuilder::createOMPFree(const LocationDescription &Loc,
2348 Value *Addr, Value *Allocator,
2349 std::string Name) {
2350 IRBuilder<>::InsertPointGuard IPG(Builder);
2351 Builder.restoreIP(Loc.IP);
2352
2353 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2354 Value *Ident = getOrCreateIdent(SrcLocStr);
2355 Value *ThreadId = getOrCreateThreadID(Ident);
2356 Value *Args[] = {ThreadId, Addr, Allocator};
2357 Function *Fn = getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_free);
2358 return Builder.CreateCall(Fn, Args, Name);
2359}
2360
2361CallInst *OpenMPIRBuilder::createCachedThreadPrivate(
2362 const LocationDescription &Loc, llvm::Value *Pointer,
2363 llvm::ConstantInt *Size, const llvm::Twine &Name) {
2364 IRBuilder<>::InsertPointGuard IPG(Builder);
2365 Builder.restoreIP(Loc.IP);
2366
2367 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2368 Value *Ident = getOrCreateIdent(SrcLocStr);
2369 Value *ThreadId = getOrCreateThreadID(Ident);
2370 Constant *ThreadPrivateCache =
2371 getOrCreateOMPInternalVariable(Int8PtrPtr, Name);
2372 llvm::Value *Args[] = {Ident, ThreadId, Pointer, Size, ThreadPrivateCache};
2373
2374 Function *Fn =
2375 getOrCreateRuntimeFunctionPtr(OMPRTL___kmpc_threadprivate_cached);
2376
2377 return Builder.CreateCall(Fn, Args);
2378}
2379
2380OpenMPIRBuilder::InsertPointTy
2381OpenMPIRBuilder::createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime) {
2382 if (!updateToLocation(Loc))
2383 return Loc.IP;
2384
2385 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2386 Value *Ident = getOrCreateIdent(SrcLocStr);
2387 ConstantInt *IsSPMDVal = ConstantInt::getBool(Int32->getContext(), IsSPMD);
2388 ConstantInt *UseGenericStateMachine =
2389 ConstantInt::getBool(Int32->getContext(), !IsSPMD);
2390 ConstantInt *RequiresFullRuntimeVal = ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
2391
2392 Function *Fn = getOrCreateRuntimeFunctionPtr(
2393 omp::RuntimeFunction::OMPRTL___kmpc_target_init);
2394
2395 CallInst *ThreadKind =
2396 Builder.CreateCall(Fn, {Ident, IsSPMDVal, UseGenericStateMachine, RequiresFullRuntimeVal});
2397
2398 Value *ExecUserCode = Builder.CreateICmpEQ(
2399 ThreadKind, ConstantInt::get(ThreadKind->getType(), -1), "exec_user_code");
2400
2401 // ThreadKind = __kmpc_target_init(...)
2402 // if (ThreadKind == -1)
2403 // user_code
2404 // else
2405 // return;
2406
2407 auto *UI = Builder.CreateUnreachable();
2408 BasicBlock *CheckBB = UI->getParent();
2409 BasicBlock *UserCodeEntryBB = CheckBB->splitBasicBlock(UI, "user_code.entry");
2410
2411 BasicBlock *WorkerExitBB = BasicBlock::Create(
2412 CheckBB->getContext(), "worker.exit", CheckBB->getParent());
2413 Builder.SetInsertPoint(WorkerExitBB);
2414 Builder.CreateRetVoid();
2415
2416 auto *CheckBBTI = CheckBB->getTerminator();
2417 Builder.SetInsertPoint(CheckBBTI);
2418 Builder.CreateCondBr(ExecUserCode, UI->getParent(), WorkerExitBB);
2419
2420 CheckBBTI->eraseFromParent();
2421 UI->eraseFromParent();
2422
2423 // Continue in the "user_code" block, see diagram above and in
2424 // openmp/libomptarget/deviceRTLs/common/include/target.h .
2425 return InsertPointTy(UserCodeEntryBB, UserCodeEntryBB->getFirstInsertionPt());
2426}
2427
2428void OpenMPIRBuilder::createTargetDeinit(const LocationDescription &Loc,
2429 bool IsSPMD, bool RequiresFullRuntime) {
2430 if (!updateToLocation(Loc))
2431 return;
2432
2433 Constant *SrcLocStr = getOrCreateSrcLocStr(Loc);
2434 Value *Ident = getOrCreateIdent(SrcLocStr);
2435 ConstantInt *IsSPMDVal = ConstantInt::getBool(Int32->getContext(), IsSPMD);
2436 ConstantInt *RequiresFullRuntimeVal = ConstantInt::getBool(Int32->getContext(), RequiresFullRuntime);
2437
2438 Function *Fn = getOrCreateRuntimeFunctionPtr(
2439 omp::RuntimeFunction::OMPRTL___kmpc_target_deinit);
2440
2441 Builder.CreateCall(Fn, {Ident, IsSPMDVal, RequiresFullRuntimeVal});
2442}
2443
2444std::string OpenMPIRBuilder::getNameWithSeparators(ArrayRef<StringRef> Parts,
2445 StringRef FirstSeparator,
2446 StringRef Separator) {
2447 SmallString<128> Buffer;
2448 llvm::raw_svector_ostream OS(Buffer);
2449 StringRef Sep = FirstSeparator;
2450 for (StringRef Part : Parts) {
2451 OS << Sep << Part;
2452 Sep = Separator;
2453 }
2454 return OS.str().str();
2455}
2456
2457Constant *OpenMPIRBuilder::getOrCreateOMPInternalVariable(
2458 llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
2459 // TODO: Replace the twine arg with stringref to get rid of the conversion
2460 // logic. However This is taken from current implementation in clang as is.
2461 // Since this method is used in many places exclusively for OMP internal use
2462 // we will keep it as is for temporarily until we move all users to the
2463 // builder and then, if possible, fix it everywhere in one go.
2464 SmallString<256> Buffer;
2465 llvm::raw_svector_ostream Out(Buffer);
2466 Out << Name;
2467 StringRef RuntimeName = Out.str();
2468 auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2469 if (Elem.second) {
2470 assert(Elem.second->getType()->getPointerElementType() == Ty &&(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__))
2471 "OMP internal variable has different type than requested")(static_cast <bool> (Elem.second->getType()->getPointerElementType
() == Ty && "OMP internal variable has different type than requested"
) ? void (0) : __assert_fail ("Elem.second->getType()->getPointerElementType() == Ty && \"OMP internal variable has different type than requested\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2471, __extension__ __PRETTY_FUNCTION__))
;
2472 } else {
2473 // TODO: investigate the appropriate linkage type used for the global
2474 // variable for possibly changing that to internal or private, or maybe
2475 // create different versions of the function for different OMP internal
2476 // variables.
2477 Elem.second = new llvm::GlobalVariable(
2478 M, Ty, /*IsConstant*/ false, llvm::GlobalValue::CommonLinkage,
2479 llvm::Constant::getNullValue(Ty), Elem.first(),
2480 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
2481 AddressSpace);
2482 }
2483
2484 return Elem.second;
2485}
2486
2487Value *OpenMPIRBuilder::getOMPCriticalRegionLock(StringRef CriticalName) {
2488 std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2489 std::string Name = getNameWithSeparators({Prefix, "var"}, ".", ".");
2490 return getOrCreateOMPInternalVariable(KmpCriticalNameTy, Name);
2491}
2492
2493GlobalVariable *
2494OpenMPIRBuilder::createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
2495 std::string VarName) {
2496 llvm::Constant *MaptypesArrayInit =
2497 llvm::ConstantDataArray::get(M.getContext(), Mappings);
2498 auto *MaptypesArrayGlobal = new llvm::GlobalVariable(
2499 M, MaptypesArrayInit->getType(),
2500 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MaptypesArrayInit,
2501 VarName);
2502 MaptypesArrayGlobal->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2503 return MaptypesArrayGlobal;
2504}
2505
2506void OpenMPIRBuilder::createMapperAllocas(const LocationDescription &Loc,
2507 InsertPointTy AllocaIP,
2508 unsigned NumOperands,
2509 struct MapperAllocas &MapperAllocas) {
2510 if (!updateToLocation(Loc))
1
Taking false branch
2511 return;
2512
2513 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands);
2514 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands);
2515 Builder.restoreIP(AllocaIP);
2
Calling 'IRBuilderBase::restoreIP'
7
Returning from 'IRBuilderBase::restoreIP'
2516 AllocaInst *ArgsBase = Builder.CreateAlloca(ArrI8PtrTy);
8
Calling 'IRBuilderBase::CreateAlloca'
2517 AllocaInst *Args = Builder.CreateAlloca(ArrI8PtrTy);
2518 AllocaInst *ArgSizes = Builder.CreateAlloca(ArrI64Ty);
2519 Builder.restoreIP(Loc.IP);
2520 MapperAllocas.ArgsBase = ArgsBase;
2521 MapperAllocas.Args = Args;
2522 MapperAllocas.ArgSizes = ArgSizes;
2523}
2524
2525void OpenMPIRBuilder::emitMapperCall(const LocationDescription &Loc,
2526 Function *MapperFunc, Value *SrcLocInfo,
2527 Value *MaptypesArg, Value *MapnamesArg,
2528 struct MapperAllocas &MapperAllocas,
2529 int64_t DeviceID, unsigned NumOperands) {
2530 if (!updateToLocation(Loc))
2531 return;
2532
2533 auto *ArrI8PtrTy = ArrayType::get(Int8Ptr, NumOperands);
2534 auto *ArrI64Ty = ArrayType::get(Int64, NumOperands);
2535 Value *ArgsBaseGEP =
2536 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.ArgsBase,
2537 {Builder.getInt32(0), Builder.getInt32(0)});
2538 Value *ArgsGEP =
2539 Builder.CreateInBoundsGEP(ArrI8PtrTy, MapperAllocas.Args,
2540 {Builder.getInt32(0), Builder.getInt32(0)});
2541 Value *ArgSizesGEP =
2542 Builder.CreateInBoundsGEP(ArrI64Ty, MapperAllocas.ArgSizes,
2543 {Builder.getInt32(0), Builder.getInt32(0)});
2544 Value *NullPtr = Constant::getNullValue(Int8Ptr->getPointerTo());
2545 Builder.CreateCall(MapperFunc,
2546 {SrcLocInfo, Builder.getInt64(DeviceID),
2547 Builder.getInt32(NumOperands), ArgsBaseGEP, ArgsGEP,
2548 ArgSizesGEP, MaptypesArg, MapnamesArg, NullPtr});
2549}
2550
2551bool OpenMPIRBuilder::checkAndEmitFlushAfterAtomic(
2552 const LocationDescription &Loc, llvm::AtomicOrdering AO, AtomicKind AK) {
2553 assert(!(AO == AtomicOrdering::NotAtomic ||(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__))
2554 AO == llvm::AtomicOrdering::Unordered) &&(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__))
2555 "Unexpected Atomic Ordering.")(static_cast <bool> (!(AO == AtomicOrdering::NotAtomic ||
AO == llvm::AtomicOrdering::Unordered) && "Unexpected Atomic Ordering."
) ? void (0) : __assert_fail ("!(AO == AtomicOrdering::NotAtomic || AO == llvm::AtomicOrdering::Unordered) && \"Unexpected Atomic Ordering.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2555, __extension__ __PRETTY_FUNCTION__))
;
2556
2557 bool Flush = false;
2558 llvm::AtomicOrdering FlushAO = AtomicOrdering::Monotonic;
2559
2560 switch (AK) {
2561 case Read:
2562 if (AO == AtomicOrdering::Acquire || AO == AtomicOrdering::AcquireRelease ||
2563 AO == AtomicOrdering::SequentiallyConsistent) {
2564 FlushAO = AtomicOrdering::Acquire;
2565 Flush = true;
2566 }
2567 break;
2568 case Write:
2569 case Update:
2570 if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease ||
2571 AO == AtomicOrdering::SequentiallyConsistent) {
2572 FlushAO = AtomicOrdering::Release;
2573 Flush = true;
2574 }
2575 break;
2576 case Capture:
2577 switch (AO) {
2578 case AtomicOrdering::Acquire:
2579 FlushAO = AtomicOrdering::Acquire;
2580 Flush = true;
2581 break;
2582 case AtomicOrdering::Release:
2583 FlushAO = AtomicOrdering::Release;
2584 Flush = true;
2585 break;
2586 case AtomicOrdering::AcquireRelease:
2587 case AtomicOrdering::SequentiallyConsistent:
2588 FlushAO = AtomicOrdering::AcquireRelease;
2589 Flush = true;
2590 break;
2591 default:
2592 // do nothing - leave silently.
2593 break;
2594 }
2595 }
2596
2597 if (Flush) {
2598 // Currently Flush RT call still doesn't take memory_ordering, so for when
2599 // that happens, this tries to do the resolution of which atomic ordering
2600 // to use with but issue the flush call
2601 // TODO: pass `FlushAO` after memory ordering support is added
2602 (void)FlushAO;
2603 emitFlush(Loc);
2604 }
2605
2606 // for AO == AtomicOrdering::Monotonic and all other case combinations
2607 // do nothing
2608 return Flush;
2609}
2610
2611OpenMPIRBuilder::InsertPointTy
2612OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc,
2613 AtomicOpValue &X, AtomicOpValue &V,
2614 AtomicOrdering AO) {
2615 if (!updateToLocation(Loc))
2616 return Loc.IP;
2617
2618 Type *XTy = X.Var->getType();
2619 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2619, __extension__ __PRETTY_FUNCTION__))
;
2620 Type *XElemTy = XTy->getPointerElementType();
2621 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2623, __extension__ __PRETTY_FUNCTION__))
2622 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2623, __extension__ __PRETTY_FUNCTION__))
2623 "OMP atomic read expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic read expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic read expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2623, __extension__ __PRETTY_FUNCTION__))
;
2624
2625 Value *XRead = nullptr;
2626
2627 if (XElemTy->isIntegerTy()) {
2628 LoadInst *XLD =
2629 Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read");
2630 XLD->setAtomic(AO);
2631 XRead = cast<Value>(XLD);
2632 } else {
2633 // We need to bitcast and perform atomic op as integer
2634 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
2635 IntegerType *IntCastTy =
2636 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2637 Value *XBCast = Builder.CreateBitCast(
2638 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.src.int.cast");
2639 LoadInst *XLoad =
2640 Builder.CreateLoad(IntCastTy, XBCast, X.IsVolatile, "omp.atomic.load");
2641 XLoad->setAtomic(AO);
2642 if (XElemTy->isFloatingPointTy()) {
2643 XRead = Builder.CreateBitCast(XLoad, XElemTy, "atomic.flt.cast");
2644 } else {
2645 XRead = Builder.CreateIntToPtr(XLoad, XElemTy, "atomic.ptr.cast");
2646 }
2647 }
2648 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Read);
2649 Builder.CreateStore(XRead, V.Var, V.IsVolatile);
2650 return Builder.saveIP();
2651}
2652
2653OpenMPIRBuilder::InsertPointTy
2654OpenMPIRBuilder::createAtomicWrite(const LocationDescription &Loc,
2655 AtomicOpValue &X, Value *Expr,
2656 AtomicOrdering AO) {
2657 if (!updateToLocation(Loc))
2658 return Loc.IP;
2659
2660 Type *XTy = X.Var->getType();
2661 assert(XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory")(static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2661, __extension__ __PRETTY_FUNCTION__))
;
2662 Type *XElemTy = XTy->getPointerElementType();
2663 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2665, __extension__ __PRETTY_FUNCTION__))
2664 XElemTy->isPointerTy()) &&(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2665, __extension__ __PRETTY_FUNCTION__))
2665 "OMP atomic write expected a scalar type")(static_cast <bool> ((XElemTy->isFloatingPointTy() ||
XElemTy->isIntegerTy() || XElemTy->isPointerTy()) &&
"OMP atomic write expected a scalar type") ? void (0) : __assert_fail
("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic write expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2665, __extension__ __PRETTY_FUNCTION__))
;
2666
2667 if (XElemTy->isIntegerTy()) {
2668 StoreInst *XSt = Builder.CreateStore(Expr, X.Var, X.IsVolatile);
2669 XSt->setAtomic(AO);
2670 } else {
2671 // We need to bitcast and perform atomic op as integers
2672 unsigned Addrspace = cast<PointerType>(XTy)->getAddressSpace();
2673 IntegerType *IntCastTy =
2674 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2675 Value *XBCast = Builder.CreateBitCast(
2676 X.Var, IntCastTy->getPointerTo(Addrspace), "atomic.dst.int.cast");
2677 Value *ExprCast =
2678 Builder.CreateBitCast(Expr, IntCastTy, "atomic.src.int.cast");
2679 StoreInst *XSt = Builder.CreateStore(ExprCast, XBCast, X.IsVolatile);
2680 XSt->setAtomic(AO);
2681 }
2682
2683 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Write);
2684 return Builder.saveIP();
2685}
2686
2687OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicUpdate(
2688 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
2689 Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
2690 AtomicUpdateCallbackTy &UpdateOp, bool IsXLHSInRHSPart) {
2691 if (!updateToLocation(Loc))
2692 return Loc.IP;
2693
2694 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2695 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2696 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2697 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2698 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2699 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2700 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2701 "OMP atomic update expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2702 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2703 (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2704 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2705 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2697, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic update expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic update expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2701, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && (RMWOp != AtomicRMWInst::UMax) && (
RMWOp != AtomicRMWInst::UMin) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && (RMWOp != AtomicRMWInst::UMax) && (RMWOp != AtomicRMWInst::UMin) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2704, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
2706
2707 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, RMWOp, UpdateOp, X.IsVolatile,
2708 IsXLHSInRHSPart);
2709 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Update);
2710 return Builder.saveIP();
2711}
2712
2713Value *OpenMPIRBuilder::emitRMWOpAsInstruction(Value *Src1, Value *Src2,
2714 AtomicRMWInst::BinOp RMWOp) {
2715 switch (RMWOp) {
2716 case AtomicRMWInst::Add:
2717 return Builder.CreateAdd(Src1, Src2);
2718 case AtomicRMWInst::Sub:
2719 return Builder.CreateSub(Src1, Src2);
2720 case AtomicRMWInst::And:
2721 return Builder.CreateAnd(Src1, Src2);
2722 case AtomicRMWInst::Nand:
2723 return Builder.CreateNeg(Builder.CreateAnd(Src1, Src2));
2724 case AtomicRMWInst::Or:
2725 return Builder.CreateOr(Src1, Src2);
2726 case AtomicRMWInst::Xor:
2727 return Builder.CreateXor(Src1, Src2);
2728 case AtomicRMWInst::Xchg:
2729 case AtomicRMWInst::FAdd:
2730 case AtomicRMWInst::FSub:
2731 case AtomicRMWInst::BAD_BINOP:
2732 case AtomicRMWInst::Max:
2733 case AtomicRMWInst::Min:
2734 case AtomicRMWInst::UMax:
2735 case AtomicRMWInst::UMin:
2736 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2736)
;
2737 }
2738 llvm_unreachable("Unsupported atomic update operation")::llvm::llvm_unreachable_internal("Unsupported atomic update operation"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2738)
;
2739}
2740
2741std::pair<Value *, Value *>
2742OpenMPIRBuilder::emitAtomicUpdate(Instruction *AllocIP, Value *X, Value *Expr,
2743 AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
2744 AtomicUpdateCallbackTy &UpdateOp,
2745 bool VolatileX, bool IsXLHSInRHSPart) {
2746 Type *XElemTy = X->getType()->getPointerElementType();
2747
2748 bool DoCmpExch =
2749 ((RMWOp == AtomicRMWInst::BAD_BINOP) || (RMWOp == AtomicRMWInst::FAdd)) ||
2750 (RMWOp == AtomicRMWInst::FSub) ||
2751 (RMWOp == AtomicRMWInst::Sub && !IsXLHSInRHSPart);
2752
2753 std::pair<Value *, Value *> Res;
2754 if (XElemTy->isIntegerTy() && !DoCmpExch) {
2755 Res.first = Builder.CreateAtomicRMW(RMWOp, X, Expr, llvm::MaybeAlign(), AO);
2756 // not needed except in case of postfix captures. Generate anyway for
2757 // consistency with the else part. Will be removed with any DCE pass.
2758 Res.second = emitRMWOpAsInstruction(Res.first, Expr, RMWOp);
2759 } else {
2760 unsigned Addrspace = cast<PointerType>(X->getType())->getAddressSpace();
2761 IntegerType *IntCastTy =
2762 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2763 Value *XBCast =
2764 Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
2765 LoadInst *OldVal =
2766 Builder.CreateLoad(IntCastTy, XBCast, X->getName() + ".atomic.load");
2767 OldVal->setAtomic(AO);
2768 // CurBB
2769 // | /---\
2770 // ContBB |
2771 // | \---/
2772 // ExitBB
2773 BasicBlock *CurBB = Builder.GetInsertBlock();
2774 Instruction *CurBBTI = CurBB->getTerminator();
2775 CurBBTI = CurBBTI ? CurBBTI : Builder.CreateUnreachable();
2776 BasicBlock *ExitBB =
2777 CurBB->splitBasicBlock(CurBBTI, X->getName() + ".atomic.exit");
2778 BasicBlock *ContBB = CurBB->splitBasicBlock(CurBB->getTerminator(),
2779 X->getName() + ".atomic.cont");
2780 ContBB->getTerminator()->eraseFromParent();
2781 Builder.SetInsertPoint(ContBB);
2782 llvm::PHINode *PHI = Builder.CreatePHI(OldVal->getType(), 2);
2783 PHI->addIncoming(OldVal, CurBB);
2784 AllocaInst *NewAtomicAddr = Builder.CreateAlloca(XElemTy);
2785 NewAtomicAddr->setName(X->getName() + "x.new.val");
2786 NewAtomicAddr->moveBefore(AllocIP);
2787 IntegerType *NewAtomicCastTy =
2788 IntegerType::get(M.getContext(), XElemTy->getScalarSizeInBits());
2789 bool IsIntTy = XElemTy->isIntegerTy();
2790 Value *NewAtomicIntAddr =
2791 (IsIntTy)
2792 ? NewAtomicAddr
2793 : Builder.CreateBitCast(NewAtomicAddr,
2794 NewAtomicCastTy->getPointerTo(Addrspace));
2795 Value *OldExprVal = PHI;
2796 if (!IsIntTy) {
2797 if (XElemTy->isFloatingPointTy()) {
2798 OldExprVal = Builder.CreateBitCast(PHI, XElemTy,
2799 X->getName() + ".atomic.fltCast");
2800 } else {
2801 OldExprVal = Builder.CreateIntToPtr(PHI, XElemTy,
2802 X->getName() + ".atomic.ptrCast");
2803 }
2804 }
2805
2806 Value *Upd = UpdateOp(OldExprVal, Builder);
2807 Builder.CreateStore(Upd, NewAtomicAddr);
2808 LoadInst *DesiredVal = Builder.CreateLoad(XElemTy, NewAtomicIntAddr);
2809 Value *XAddr =
2810 (IsIntTy)
2811 ? X
2812 : Builder.CreateBitCast(X, IntCastTy->getPointerTo(Addrspace));
2813 AtomicOrdering Failure =
2814 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
2815 AtomicCmpXchgInst *Result = Builder.CreateAtomicCmpXchg(
2816 XAddr, OldExprVal, DesiredVal, llvm::MaybeAlign(), AO, Failure);
2817 Result->setVolatile(VolatileX);
2818 Value *PreviousVal = Builder.CreateExtractValue(Result, /*Idxs=*/0);
2819 Value *SuccessFailureVal = Builder.CreateExtractValue(Result, /*Idxs=*/1);
2820 PHI->addIncoming(PreviousVal, Builder.GetInsertBlock());
2821 Builder.CreateCondBr(SuccessFailureVal, ExitBB, ContBB);
2822
2823 Res.first = OldExprVal;
2824 Res.second = Upd;
2825
2826 // set Insertion point in exit block
2827 if (UnreachableInst *ExitTI =
2828 dyn_cast<UnreachableInst>(ExitBB->getTerminator())) {
2829 CurBBTI->eraseFromParent();
2830 Builder.SetInsertPoint(ExitBB);
2831 } else {
2832 Builder.SetInsertPoint(ExitTI);
2833 }
2834 }
2835
2836 return Res;
2837}
2838
2839OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::createAtomicCapture(
2840 const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X,
2841 AtomicOpValue &V, Value *Expr, AtomicOrdering AO,
2842 AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp,
2843 bool UpdateExpr, bool IsPostfixUpdate, bool IsXLHSInRHSPart) {
2844 if (!updateToLocation(Loc))
2845 return Loc.IP;
2846
2847 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2848 Type *XTy = X.Var->getType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2849 assert(XTy->isPointerTy() &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2850 "OMP Atomic expects a pointer to target memory");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2851 Type *XElemTy = XTy->getPointerElementType();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2852 assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() ||do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2853 XElemTy->isPointerTy()) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2854 "OMP atomic capture expected a scalar type");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2855 assert((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) &&do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2856 "OpenMP atomic does not support LT or GT operations");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
2857 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("openmp-ir-builder")) { { Type *XTy = X.Var->getType(); (
static_cast <bool> (XTy->isPointerTy() && "OMP Atomic expects a pointer to target memory"
) ? void (0) : __assert_fail ("XTy->isPointerTy() && \"OMP Atomic expects a pointer to target memory\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2850, __extension__ __PRETTY_FUNCTION__)); Type *XElemTy = XTy
->getPointerElementType(); (static_cast <bool> ((XElemTy
->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy
->isPointerTy()) && "OMP atomic capture expected a scalar type"
) ? void (0) : __assert_fail ("(XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || XElemTy->isPointerTy()) && \"OMP atomic capture expected a scalar type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2854, __extension__ __PRETTY_FUNCTION__)); (static_cast <
bool> ((RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst
::Min) && "OpenMP atomic does not support LT or GT operations"
) ? void (0) : __assert_fail ("(RMWOp != AtomicRMWInst::Max) && (RMWOp != AtomicRMWInst::Min) && \"OpenMP atomic does not support LT or GT operations\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2856, __extension__ __PRETTY_FUNCTION__)); }; } } while (false
)
;
2858
2859 // If UpdateExpr is 'x' updated with some `expr` not based on 'x',
2860 // 'x' is simply atomically rewritten with 'expr'.
2861 AtomicRMWInst::BinOp AtomicOp = (UpdateExpr ? RMWOp : AtomicRMWInst::Xchg);
2862 std::pair<Value *, Value *> Result =
2863 emitAtomicUpdate(AllocIP, X.Var, Expr, AO, AtomicOp, UpdateOp,
2864 X.IsVolatile, IsXLHSInRHSPart);
2865
2866 Value *CapturedVal = (IsPostfixUpdate ? Result.first : Result.second);
2867 Builder.CreateStore(CapturedVal, V.Var, V.IsVolatile);
2868
2869 checkAndEmitFlushAfterAtomic(Loc, AO, AtomicKind::Capture);
2870 return Builder.saveIP();
2871}
2872
2873GlobalVariable *
2874OpenMPIRBuilder::createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
2875 std::string VarName) {
2876 llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get(
2877 llvm::ArrayType::get(
2878 llvm::Type::getInt8Ty(M.getContext())->getPointerTo(), Names.size()),
2879 Names);
2880 auto *MapNamesArrayGlobal = new llvm::GlobalVariable(
2881 M, MapNamesArrayInit->getType(),
2882 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, MapNamesArrayInit,
2883 VarName);
2884 return MapNamesArrayGlobal;
2885}
2886
2887// Create all simple and struct types exposed by the runtime and remember
2888// the llvm::PointerTypes of them for easy access later.
2889void OpenMPIRBuilder::initializeTypes(Module &M) {
2890 LLVMContext &Ctx = M.getContext();
2891 StructType *T;
2892#define OMP_TYPE(VarName, InitValue) VarName = InitValue;
2893#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
2894 VarName##Ty = ArrayType::get(ElemTy, ArraySize); \
2895 VarName##PtrTy = PointerType::getUnqual(VarName##Ty);
2896#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
2897 VarName = FunctionType::get(ReturnType, {__VA_ARGS__}, IsVarArg); \
2898 VarName##Ptr = PointerType::getUnqual(VarName);
2899#define OMP_STRUCT_TYPE(VarName, StructName, ...) \
2900 T = StructType::getTypeByName(Ctx, StructName); \
2901 if (!T) \
2902 T = StructType::create(Ctx, {__VA_ARGS__}, StructName); \
2903 VarName = T; \
2904 VarName##Ptr = PointerType::getUnqual(T);
2905#include "llvm/Frontend/OpenMP/OMPKinds.def"
2906}
2907
2908void OpenMPIRBuilder::OutlineInfo::collectBlocks(
2909 SmallPtrSetImpl<BasicBlock *> &BlockSet,
2910 SmallVectorImpl<BasicBlock *> &BlockVector) {
2911 SmallVector<BasicBlock *, 32> Worklist;
2912 BlockSet.insert(EntryBB);
2913 BlockSet.insert(ExitBB);
2914
2915 Worklist.push_back(EntryBB);
2916 while (!Worklist.empty()) {
2917 BasicBlock *BB = Worklist.pop_back_val();
2918 BlockVector.push_back(BB);
2919 for (BasicBlock *SuccBB : successors(BB))
2920 if (BlockSet.insert(SuccBB).second)
2921 Worklist.push_back(SuccBB);
2922 }
2923}
2924
2925void CanonicalLoopInfo::collectControlBlocks(
2926 SmallVectorImpl<BasicBlock *> &BBs) {
2927 // We only count those BBs as control block for which we do not need to
2928 // reverse the CFG, i.e. not the loop body which can contain arbitrary control
2929 // flow. For consistency, this also means we do not add the Body block, which
2930 // is just the entry to the body code.
2931 BBs.reserve(BBs.size() + 6);
2932 BBs.append({Preheader, Header, Cond, Latch, Exit, After});
2933}
2934
2935void CanonicalLoopInfo::assertOK() const {
2936#ifndef NDEBUG
2937 // No constraints if this object currently does not describe a loop.
2938 if (!isValid())
2939 return;
2940
2941 // Verify standard control-flow we use for OpenMP loops.
2942 assert(Preheader)(static_cast <bool> (Preheader) ? void (0) : __assert_fail
("Preheader", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2942, __extension__ __PRETTY_FUNCTION__))
;
2943 assert(isa<BranchInst>(Preheader->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2944, __extension__ __PRETTY_FUNCTION__))
2944 "Preheader must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Preheader->
getTerminator()) && "Preheader must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Preheader->getTerminator()) && \"Preheader must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2944, __extension__ __PRETTY_FUNCTION__))
;
2945 assert(Preheader->getSingleSuccessor() == Header &&(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2946, __extension__ __PRETTY_FUNCTION__))
2946 "Preheader must jump to header")(static_cast <bool> (Preheader->getSingleSuccessor()
== Header && "Preheader must jump to header") ? void
(0) : __assert_fail ("Preheader->getSingleSuccessor() == Header && \"Preheader must jump to header\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2946, __extension__ __PRETTY_FUNCTION__))
;
2947
2948 assert(Header)(static_cast <bool> (Header) ? void (0) : __assert_fail
("Header", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2948, __extension__ __PRETTY_FUNCTION__))
;
2949 assert(isa<BranchInst>(Header->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2950, __extension__ __PRETTY_FUNCTION__))
2950 "Header must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Header->getTerminator
()) && "Header must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Header->getTerminator()) && \"Header must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2950, __extension__ __PRETTY_FUNCTION__))
;
2951 assert(Header->getSingleSuccessor() == Cond &&(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2952, __extension__ __PRETTY_FUNCTION__))
2952 "Header must jump to exiting block")(static_cast <bool> (Header->getSingleSuccessor() ==
Cond && "Header must jump to exiting block") ? void (
0) : __assert_fail ("Header->getSingleSuccessor() == Cond && \"Header must jump to exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2952, __extension__ __PRETTY_FUNCTION__))
;
2953
2954 assert(Cond)(static_cast <bool> (Cond) ? void (0) : __assert_fail (
"Cond", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2954, __extension__ __PRETTY_FUNCTION__))
;
2955 assert(Cond->getSinglePredecessor() == Header &&(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2956, __extension__ __PRETTY_FUNCTION__))
2956 "Exiting block only reachable from header")(static_cast <bool> (Cond->getSinglePredecessor() ==
Header && "Exiting block only reachable from header"
) ? void (0) : __assert_fail ("Cond->getSinglePredecessor() == Header && \"Exiting block only reachable from header\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2956, __extension__ __PRETTY_FUNCTION__))
;
2957
2958 assert(isa<BranchInst>(Cond->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2959, __extension__ __PRETTY_FUNCTION__))
2959 "Exiting block must terminate with conditional branch")(static_cast <bool> (isa<BranchInst>(Cond->getTerminator
()) && "Exiting block must terminate with conditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Cond->getTerminator()) && \"Exiting block must terminate with conditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2959, __extension__ __PRETTY_FUNCTION__))
;
2960 assert(size(successors(Cond)) == 2 &&(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2961, __extension__ __PRETTY_FUNCTION__))
2961 "Exiting block must have two successors")(static_cast <bool> (size(successors(Cond)) == 2 &&
"Exiting block must have two successors") ? void (0) : __assert_fail
("size(successors(Cond)) == 2 && \"Exiting block must have two successors\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2961, __extension__ __PRETTY_FUNCTION__))
;
2962 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2963, __extension__ __PRETTY_FUNCTION__))
2963 "Exiting block's first successor jump to the body")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(0) == Body && "Exiting block's first successor jump to the body"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(0) == Body && \"Exiting block's first successor jump to the body\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2963, __extension__ __PRETTY_FUNCTION__))
;
2964 assert(cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit &&(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2965, __extension__ __PRETTY_FUNCTION__))
2965 "Exiting block's second successor must exit the loop")(static_cast <bool> (cast<BranchInst>(Cond->getTerminator
())->getSuccessor(1) == Exit && "Exiting block's second successor must exit the loop"
) ? void (0) : __assert_fail ("cast<BranchInst>(Cond->getTerminator())->getSuccessor(1) == Exit && \"Exiting block's second successor must exit the loop\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2965, __extension__ __PRETTY_FUNCTION__))
;
2966
2967 assert(Body)(static_cast <bool> (Body) ? void (0) : __assert_fail (
"Body", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2967, __extension__ __PRETTY_FUNCTION__))
;
2968 assert(Body->getSinglePredecessor() == Cond &&(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2969, __extension__ __PRETTY_FUNCTION__))
2969 "Body only reachable from exiting block")(static_cast <bool> (Body->getSinglePredecessor() ==
Cond && "Body only reachable from exiting block") ? void
(0) : __assert_fail ("Body->getSinglePredecessor() == Cond && \"Body only reachable from exiting block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2969, __extension__ __PRETTY_FUNCTION__))
;
2970 assert(!isa<PHINode>(Body->front()))(static_cast <bool> (!isa<PHINode>(Body->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Body->front())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2970, __extension__ __PRETTY_FUNCTION__))
;
2971
2972 assert(Latch)(static_cast <bool> (Latch) ? void (0) : __assert_fail (
"Latch", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2972, __extension__ __PRETTY_FUNCTION__))
;
2973 assert(isa<BranchInst>(Latch->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2974, __extension__ __PRETTY_FUNCTION__))
2974 "Latch must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Latch->getTerminator
()) && "Latch must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Latch->getTerminator()) && \"Latch must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2974, __extension__ __PRETTY_FUNCTION__))
;
2975 assert(Latch->getSingleSuccessor() == Header && "Latch must jump to header")(static_cast <bool> (Latch->getSingleSuccessor() == Header
&& "Latch must jump to header") ? void (0) : __assert_fail
("Latch->getSingleSuccessor() == Header && \"Latch must jump to header\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2975, __extension__ __PRETTY_FUNCTION__))
;
2976 // TODO: To support simple redirecting of the end of the body code that has
2977 // multiple; introduce another auxiliary basic block like preheader and after.
2978 assert(Latch->getSinglePredecessor() != nullptr)(static_cast <bool> (Latch->getSinglePredecessor() !=
nullptr) ? void (0) : __assert_fail ("Latch->getSinglePredecessor() != nullptr"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2978, __extension__ __PRETTY_FUNCTION__))
;
2979 assert(!isa<PHINode>(Latch->front()))(static_cast <bool> (!isa<PHINode>(Latch->front
())) ? void (0) : __assert_fail ("!isa<PHINode>(Latch->front())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2979, __extension__ __PRETTY_FUNCTION__))
;
2980
2981 assert(Exit)(static_cast <bool> (Exit) ? void (0) : __assert_fail (
"Exit", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2981, __extension__ __PRETTY_FUNCTION__))
;
2982 assert(isa<BranchInst>(Exit->getTerminator()) &&(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2983, __extension__ __PRETTY_FUNCTION__))
2983 "Exit block must terminate with unconditional branch")(static_cast <bool> (isa<BranchInst>(Exit->getTerminator
()) && "Exit block must terminate with unconditional branch"
) ? void (0) : __assert_fail ("isa<BranchInst>(Exit->getTerminator()) && \"Exit block must terminate with unconditional branch\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2983, __extension__ __PRETTY_FUNCTION__))
;
2984 assert(Exit->getSingleSuccessor() == After &&(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2985, __extension__ __PRETTY_FUNCTION__))
2985 "Exit block must jump to after block")(static_cast <bool> (Exit->getSingleSuccessor() == After
&& "Exit block must jump to after block") ? void (0)
: __assert_fail ("Exit->getSingleSuccessor() == After && \"Exit block must jump to after block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2985, __extension__ __PRETTY_FUNCTION__))
;
2986
2987 assert(After)(static_cast <bool> (After) ? void (0) : __assert_fail (
"After", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2987, __extension__ __PRETTY_FUNCTION__))
;
2988 assert(After->getSinglePredecessor() == Exit &&(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2989, __extension__ __PRETTY_FUNCTION__))
2989 "After block only reachable from exit block")(static_cast <bool> (After->getSinglePredecessor() ==
Exit && "After block only reachable from exit block"
) ? void (0) : __assert_fail ("After->getSinglePredecessor() == Exit && \"After block only reachable from exit block\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2989, __extension__ __PRETTY_FUNCTION__))
;
2990 assert(After->empty() || !isa<PHINode>(After->front()))(static_cast <bool> (After->empty() || !isa<PHINode
>(After->front())) ? void (0) : __assert_fail ("After->empty() || !isa<PHINode>(After->front())"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2990, __extension__ __PRETTY_FUNCTION__))
;
2991
2992 Instruction *IndVar = getIndVar();
2993 assert(IndVar && "Canonical induction variable not found?")(static_cast <bool> (IndVar && "Canonical induction variable not found?"
) ? void (0) : __assert_fail ("IndVar && \"Canonical induction variable not found?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2993, __extension__ __PRETTY_FUNCTION__))
;
2994 assert(isa<IntegerType>(IndVar->getType()) &&(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2995, __extension__ __PRETTY_FUNCTION__))
2995 "Induction variable must be an integer")(static_cast <bool> (isa<IntegerType>(IndVar->
getType()) && "Induction variable must be an integer"
) ? void (0) : __assert_fail ("isa<IntegerType>(IndVar->getType()) && \"Induction variable must be an integer\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2995, __extension__ __PRETTY_FUNCTION__))
;
2996 assert(cast<PHINode>(IndVar)->getParent() == Header &&(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2997, __extension__ __PRETTY_FUNCTION__))
2997 "Induction variable must be a PHI in the loop header")(static_cast <bool> (cast<PHINode>(IndVar)->getParent
() == Header && "Induction variable must be a PHI in the loop header"
) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getParent() == Header && \"Induction variable must be a PHI in the loop header\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2997, __extension__ __PRETTY_FUNCTION__))
;
2998 assert(cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(0) == Preheader) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(0) == Preheader"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 2998, __extension__ __PRETTY_FUNCTION__))
;
2999 assert((static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3000, __extension__ __PRETTY_FUNCTION__))
3000 cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero())(static_cast <bool> (cast<ConstantInt>(cast<PHINode
>(IndVar)->getIncomingValue(0))->isZero()) ? void (0
) : __assert_fail ("cast<ConstantInt>(cast<PHINode>(IndVar)->getIncomingValue(0))->isZero()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3000, __extension__ __PRETTY_FUNCTION__))
;
3001 assert(cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch)(static_cast <bool> (cast<PHINode>(IndVar)->getIncomingBlock
(1) == Latch) ? void (0) : __assert_fail ("cast<PHINode>(IndVar)->getIncomingBlock(1) == Latch"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3001, __extension__ __PRETTY_FUNCTION__))
;
3002
3003 auto *NextIndVar = cast<PHINode>(IndVar)->getIncomingValue(1);
3004 assert(cast<Instruction>(NextIndVar)->getParent() == Latch)(static_cast <bool> (cast<Instruction>(NextIndVar
)->getParent() == Latch) ? void (0) : __assert_fail ("cast<Instruction>(NextIndVar)->getParent() == Latch"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3004, __extension__ __PRETTY_FUNCTION__))
;
3005 assert(cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOpcode() == BinaryOperator::Add) ? void (0) : __assert_fail
("cast<BinaryOperator>(NextIndVar)->getOpcode() == BinaryOperator::Add"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3005, __extension__ __PRETTY_FUNCTION__))
;
3006 assert(cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar)(static_cast <bool> (cast<BinaryOperator>(NextIndVar
)->getOperand(0) == IndVar) ? void (0) : __assert_fail ("cast<BinaryOperator>(NextIndVar)->getOperand(0) == IndVar"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3006, __extension__ __PRETTY_FUNCTION__))
;
3007 assert(cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1))(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3008, __extension__ __PRETTY_FUNCTION__))
3008 ->isOne())(static_cast <bool> (cast<ConstantInt>(cast<BinaryOperator
>(NextIndVar)->getOperand(1)) ->isOne()) ? void (0) :
__assert_fail ("cast<ConstantInt>(cast<BinaryOperator>(NextIndVar)->getOperand(1)) ->isOne()"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3008, __extension__ __PRETTY_FUNCTION__))
;
3009
3010 Value *TripCount = getTripCount();
3011 assert(TripCount && "Loop trip count not found?")(static_cast <bool> (TripCount && "Loop trip count not found?"
) ? void (0) : __assert_fail ("TripCount && \"Loop trip count not found?\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3011, __extension__ __PRETTY_FUNCTION__))
;
3012 assert(IndVar->getType() == TripCount->getType() &&(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3013, __extension__ __PRETTY_FUNCTION__))
3013 "Trip count and induction variable must have the same type")(static_cast <bool> (IndVar->getType() == TripCount->
getType() && "Trip count and induction variable must have the same type"
) ? void (0) : __assert_fail ("IndVar->getType() == TripCount->getType() && \"Trip count and induction variable must have the same type\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3013, __extension__ __PRETTY_FUNCTION__))
;
3014
3015 auto *CmpI = cast<CmpInst>(&Cond->front());
3016 assert(CmpI->getPredicate() == CmpInst::ICMP_ULT &&(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3017, __extension__ __PRETTY_FUNCTION__))
3017 "Exit condition must be a signed less-than comparison")(static_cast <bool> (CmpI->getPredicate() == CmpInst
::ICMP_ULT && "Exit condition must be a signed less-than comparison"
) ? void (0) : __assert_fail ("CmpI->getPredicate() == CmpInst::ICMP_ULT && \"Exit condition must be a signed less-than comparison\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3017, __extension__ __PRETTY_FUNCTION__))
;
3018 assert(CmpI->getOperand(0) == IndVar &&(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3019, __extension__ __PRETTY_FUNCTION__))
3019 "Exit condition must compare the induction variable")(static_cast <bool> (CmpI->getOperand(0) == IndVar &&
"Exit condition must compare the induction variable") ? void
(0) : __assert_fail ("CmpI->getOperand(0) == IndVar && \"Exit condition must compare the induction variable\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3019, __extension__ __PRETTY_FUNCTION__))
;
3020 assert(CmpI->getOperand(1) == TripCount &&(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3021, __extension__ __PRETTY_FUNCTION__))
3021 "Exit condition must compare with the trip count")(static_cast <bool> (CmpI->getOperand(1) == TripCount
&& "Exit condition must compare with the trip count"
) ? void (0) : __assert_fail ("CmpI->getOperand(1) == TripCount && \"Exit condition must compare with the trip count\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp"
, 3021, __extension__ __PRETTY_FUNCTION__))
;
3022#endif
3023}
3024
3025void CanonicalLoopInfo::invalidate() {
3026 Preheader = nullptr;
3027 Header = nullptr;
3028 Cond = nullptr;
3029 Body = nullptr;
3030 Latch = nullptr;
3031 Exit = nullptr;
3032 After = nullptr;
3033}

/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0)
: __assert_fail ("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 163, __extension__ __PRETTY_FUNCTION__))
;
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
5
Null pointer value stored to field 'BB'
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 194, __extension__ __PRETTY_FUNCTION__))
;
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
3
Taking false branch
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
4
Calling 'IRBuilderBase::ClearInsertionPoint'
6
Returning from 'IRBuilderBase::ClearInsertionPoint'
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 320, __extension__ __PRETTY_FUNCTION__))
;
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 329, __extension__ __PRETTY_FUNCTION__))
;
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!"
) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 345, __extension__ __PRETTY_FUNCTION__))
;
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addFnAttr(Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *
651 CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
652 MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false,
653 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
654 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
655
656 /// Create and insert an element unordered-atomic memcpy between the
657 /// specified pointers.
658 ///
659 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
660 ///
661 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
662 /// specified, it will be added to the instruction. Likewise with alias.scope
663 /// and noalias tags.
664 CallInst *CreateElementUnorderedAtomicMemCpy(
665 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
666 uint32_t ElementSize, MDNode *TBAATag = nullptr,
667 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
668 MDNode *NoAliasTag = nullptr);
669
670 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
671 MaybeAlign SrcAlign, uint64_t Size,
672 bool isVolatile = false, MDNode *TBAATag = nullptr,
673 MDNode *ScopeTag = nullptr,
674 MDNode *NoAliasTag = nullptr) {
675 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
676 isVolatile, TBAATag, ScopeTag, NoAliasTag);
677 }
678
679 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
680 MaybeAlign SrcAlign, Value *Size,
681 bool isVolatile = false, MDNode *TBAATag = nullptr,
682 MDNode *ScopeTag = nullptr,
683 MDNode *NoAliasTag = nullptr);
684
685 /// \brief Create and insert an element unordered-atomic memmove between the
686 /// specified pointers.
687 ///
688 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
689 /// respectively.
690 ///
691 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
692 /// specified, it will be added to the instruction. Likewise with alias.scope
693 /// and noalias tags.
694 CallInst *CreateElementUnorderedAtomicMemMove(
695 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
696 uint32_t ElementSize, MDNode *TBAATag = nullptr,
697 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
698 MDNode *NoAliasTag = nullptr);
699
700 /// Create a sequential vector fadd reduction intrinsic of the source vector.
701 /// The first parameter is a scalar accumulator value. An unordered reduction
702 /// can be created by adding the reassoc fast-math flag to the resulting
703 /// sequential reduction.
704 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
705
706 /// Create a sequential vector fmul reduction intrinsic of the source vector.
707 /// The first parameter is a scalar accumulator value. An unordered reduction
708 /// can be created by adding the reassoc fast-math flag to the resulting
709 /// sequential reduction.
710 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
711
712 /// Create a vector int add reduction intrinsic of the source vector.
713 CallInst *CreateAddReduce(Value *Src);
714
715 /// Create a vector int mul reduction intrinsic of the source vector.
716 CallInst *CreateMulReduce(Value *Src);
717
718 /// Create a vector int AND reduction intrinsic of the source vector.
719 CallInst *CreateAndReduce(Value *Src);
720
721 /// Create a vector int OR reduction intrinsic of the source vector.
722 CallInst *CreateOrReduce(Value *Src);
723
724 /// Create a vector int XOR reduction intrinsic of the source vector.
725 CallInst *CreateXorReduce(Value *Src);
726
727 /// Create a vector integer max reduction intrinsic of the source
728 /// vector.
729 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
730
731 /// Create a vector integer min reduction intrinsic of the source
732 /// vector.
733 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
734
735 /// Create a vector float max reduction intrinsic of the source
736 /// vector.
737 CallInst *CreateFPMaxReduce(Value *Src);
738
739 /// Create a vector float min reduction intrinsic of the source
740 /// vector.
741 CallInst *CreateFPMinReduce(Value *Src);
742
743 /// Create a lifetime.start intrinsic.
744 ///
745 /// If the pointer isn't i8* it will be converted.
746 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
747
748 /// Create a lifetime.end intrinsic.
749 ///
750 /// If the pointer isn't i8* it will be converted.
751 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
752
753 /// Create a call to invariant.start intrinsic.
754 ///
755 /// If the pointer isn't i8* it will be converted.
756 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
757
758 /// Create a call to Masked Load intrinsic
759 CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
760 Value *PassThru = nullptr, const Twine &Name = "");
761
762 /// Create a call to Masked Store intrinsic
763 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
764 Value *Mask);
765
766 /// Create a call to Masked Gather intrinsic
767 CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
768 Value *Mask = nullptr, Value *PassThru = nullptr,
769 const Twine &Name = "");
770
771 /// Create a call to Masked Scatter intrinsic
772 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
773 Value *Mask = nullptr);
774
775 /// Create an assume intrinsic call that allows the optimizer to
776 /// assume that the provided condition will be true.
777 ///
778 /// The optional argument \p OpBundles specifies operand bundles that are
779 /// added to the call instruction.
780 CallInst *CreateAssumption(Value *Cond,
781 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
782
783 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
784 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
785 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
786 return CreateNoAliasScopeDeclaration(
787 MetadataAsValue::get(Context, ScopeTag));
788 }
789
790 /// Create a call to the experimental.gc.statepoint intrinsic to
791 /// start a new statepoint sequence.
792 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
793 Value *ActualCallee,
794 ArrayRef<Value *> CallArgs,
795 Optional<ArrayRef<Value *>> DeoptArgs,
796 ArrayRef<Value *> GCArgs,
797 const Twine &Name = "");
798
799 /// Create a call to the experimental.gc.statepoint intrinsic to
800 /// start a new statepoint sequence.
801 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
802 Value *ActualCallee, uint32_t Flags,
803 ArrayRef<Value *> CallArgs,
804 Optional<ArrayRef<Use>> TransitionArgs,
805 Optional<ArrayRef<Use>> DeoptArgs,
806 ArrayRef<Value *> GCArgs,
807 const Twine &Name = "");
808
809 /// Conveninence function for the common case when CallArgs are filled
810 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
811 /// .get()'ed to get the Value pointer.
812 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
813 Value *ActualCallee, ArrayRef<Use> CallArgs,
814 Optional<ArrayRef<Value *>> DeoptArgs,
815 ArrayRef<Value *> GCArgs,
816 const Twine &Name = "");
817
818 /// Create an invoke to the experimental.gc.statepoint intrinsic to
819 /// start a new statepoint sequence.
820 InvokeInst *
821 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
822 Value *ActualInvokee, BasicBlock *NormalDest,
823 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
824 Optional<ArrayRef<Value *>> DeoptArgs,
825 ArrayRef<Value *> GCArgs, const Twine &Name = "");
826
827 /// Create an invoke to the experimental.gc.statepoint intrinsic to
828 /// start a new statepoint sequence.
829 InvokeInst *CreateGCStatepointInvoke(
830 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
831 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
832 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
833 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
834 const Twine &Name = "");
835
836 // Convenience function for the common case when CallArgs are filled in using
837 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
838 // get the Value *.
839 InvokeInst *
840 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
841 Value *ActualInvokee, BasicBlock *NormalDest,
842 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
843 Optional<ArrayRef<Value *>> DeoptArgs,
844 ArrayRef<Value *> GCArgs, const Twine &Name = "");
845
846 /// Create a call to the experimental.gc.result intrinsic to extract
847 /// the result from a call wrapped in a statepoint.
848 CallInst *CreateGCResult(Instruction *Statepoint,
849 Type *ResultType,
850 const Twine &Name = "");
851
852 /// Create a call to the experimental.gc.relocate intrinsics to
853 /// project the relocated value of one pointer from the statepoint.
854 CallInst *CreateGCRelocate(Instruction *Statepoint,
855 int BaseOffset,
856 int DerivedOffset,
857 Type *ResultType,
858 const Twine &Name = "");
859
860 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
861 /// base pointer for the specified derived pointer.
862 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
863
864 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
865 /// the offset of the specified derived pointer from its base.
866 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
867
868 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
869 /// will be the same type as that of \p Scaling.
870 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
871
872 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
873 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
874
875 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
876 /// type.
877 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
878 Instruction *FMFSource = nullptr,
879 const Twine &Name = "");
880
881 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
882 /// first type.
883 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
884 Instruction *FMFSource = nullptr,
885 const Twine &Name = "");
886
887 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
888 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
889 /// the intrinsic.
890 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
891 ArrayRef<Value *> Args,
892 Instruction *FMFSource = nullptr,
893 const Twine &Name = "");
894
895 /// Create call to the minnum intrinsic.
896 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
897 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
898 }
899
900 /// Create call to the maxnum intrinsic.
901 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
902 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
903 }
904
905 /// Create call to the minimum intrinsic.
906 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
907 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
908 }
909
910 /// Create call to the maximum intrinsic.
911 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
912 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
913 }
914
915 /// Create a call to the arithmetic_fence intrinsic.
916 CallInst *CreateArithmeticFence(Value *Val, Type *DstType,
917 const Twine &Name = "") {
918 return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr,
919 Name);
920 }
921
922 /// Create a call to the experimental.vector.extract intrinsic.
923 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
924 const Twine &Name = "") {
925 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
926 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
927 Name);
928 }
929
930 /// Create a call to the experimental.vector.insert intrinsic.
931 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
932 Value *Idx, const Twine &Name = "") {
933 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
934 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
935 nullptr, Name);
936 }
937
938private:
939 /// Create a call to a masked intrinsic with given Id.
940 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
941 ArrayRef<Type *> OverloadedTypes,
942 const Twine &Name = "");
943
944 Value *getCastedInt8PtrValue(Value *Ptr);
945
946 //===--------------------------------------------------------------------===//
947 // Instruction creation methods: Terminators
948 //===--------------------------------------------------------------------===//
949
950private:
951 /// Helper to add branch weight and unpredictable metadata onto an
952 /// instruction.
953 /// \returns The annotated instruction.
954 template <typename InstTy>
955 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
956 if (Weights)
957 I->setMetadata(LLVMContext::MD_prof, Weights);
958 if (Unpredictable)
959 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
960 return I;
961 }
962
963public:
964 /// Create a 'ret void' instruction.
965 ReturnInst *CreateRetVoid() {
966 return Insert(ReturnInst::Create(Context));
967 }
968
969 /// Create a 'ret <val>' instruction.
970 ReturnInst *CreateRet(Value *V) {
971 return Insert(ReturnInst::Create(Context, V));
972 }
973
974 /// Create a sequence of N insertvalue instructions,
975 /// with one Value from the retVals array each, that build a aggregate
976 /// return value one value at a time, and a ret instruction to return
977 /// the resulting aggregate value.
978 ///
979 /// This is a convenience function for code that uses aggregate return values
980 /// as a vehicle for having multiple return values.
981 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
982 Value *V = UndefValue::get(getCurrentFunctionReturnType());
983 for (unsigned i = 0; i != N; ++i)
984 V = CreateInsertValue(V, retVals[i], i, "mrv");
985 return Insert(ReturnInst::Create(Context, V));
986 }
987
988 /// Create an unconditional 'br label X' instruction.
989 BranchInst *CreateBr(BasicBlock *Dest) {
990 return Insert(BranchInst::Create(Dest));
991 }
992
993 /// Create a conditional 'br Cond, TrueDest, FalseDest'
994 /// instruction.
995 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
996 MDNode *BranchWeights = nullptr,
997 MDNode *Unpredictable = nullptr) {
998 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
999 BranchWeights, Unpredictable));
1000 }
1001
1002 /// Create a conditional 'br Cond, TrueDest, FalseDest'
1003 /// instruction. Copy branch meta data if available.
1004 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1005 Instruction *MDSrc) {
1006 BranchInst *Br = BranchInst::Create(True, False, Cond);
1007 if (MDSrc) {
1008 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
1009 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
1010 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
1011 }
1012 return Insert(Br);
1013 }
1014
1015 /// Create a switch instruction with the specified value, default dest,
1016 /// and with a hint for the number of cases that will be added (for efficient
1017 /// allocation).
1018 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1019 MDNode *BranchWeights = nullptr,
1020 MDNode *Unpredictable = nullptr) {
1021 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1022 BranchWeights, Unpredictable));
1023 }
1024
1025 /// Create an indirect branch instruction with the specified address
1026 /// operand, with an optional hint for the number of destinations that will be
1027 /// added (for efficient allocation).
1028 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1029 return Insert(IndirectBrInst::Create(Addr, NumDests));
1030 }
1031
1032 /// Create an invoke instruction.
1033 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1034 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1035 ArrayRef<Value *> Args,
1036 ArrayRef<OperandBundleDef> OpBundles,
1037 const Twine &Name = "") {
1038 InvokeInst *II =
1039 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1040 if (IsFPConstrained)
1041 setConstrainedFPCallAttr(II);
1042 return Insert(II, Name);
1043 }
1044 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1045 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1046 ArrayRef<Value *> Args = None,
1047 const Twine &Name = "") {
1048 InvokeInst *II =
1049 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1050 if (IsFPConstrained)
1051 setConstrainedFPCallAttr(II);
1052 return Insert(II, Name);
1053 }
1054
1055 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1056 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1057 ArrayRef<OperandBundleDef> OpBundles,
1058 const Twine &Name = "") {
1059 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1060 NormalDest, UnwindDest, Args, OpBundles, Name);
1061 }
1062
1063 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1064 BasicBlock *UnwindDest,
1065 ArrayRef<Value *> Args = None,
1066 const Twine &Name = "") {
1067 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1068 NormalDest, UnwindDest, Args, Name);
1069 }
1070
1071 /// \brief Create a callbr instruction.
1072 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1073 BasicBlock *DefaultDest,
1074 ArrayRef<BasicBlock *> IndirectDests,
1075 ArrayRef<Value *> Args = None,
1076 const Twine &Name = "") {
1077 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1078 Args), Name);
1079 }
1080 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1081 BasicBlock *DefaultDest,
1082 ArrayRef<BasicBlock *> IndirectDests,
1083 ArrayRef<Value *> Args,
1084 ArrayRef<OperandBundleDef> OpBundles,
1085 const Twine &Name = "") {
1086 return Insert(
1087 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1088 OpBundles), Name);
1089 }
1090
1091 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1092 ArrayRef<BasicBlock *> IndirectDests,
1093 ArrayRef<Value *> Args = None,
1094 const Twine &Name = "") {
1095 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1096 DefaultDest, IndirectDests, Args, Name);
1097 }
1098 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1099 ArrayRef<BasicBlock *> IndirectDests,
1100 ArrayRef<Value *> Args,
1101 ArrayRef<OperandBundleDef> OpBundles,
1102 const Twine &Name = "") {
1103 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1104 DefaultDest, IndirectDests, Args, Name);
1105 }
1106
1107 ResumeInst *CreateResume(Value *Exn) {
1108 return Insert(ResumeInst::Create(Exn));
1109 }
1110
1111 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1112 BasicBlock *UnwindBB = nullptr) {
1113 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1114 }
1115
1116 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1117 unsigned NumHandlers,
1118 const Twine &Name = "") {
1119 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1120 Name);
1121 }
1122
1123 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1124 const Twine &Name = "") {
1125 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1126 }
1127
1128 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1129 ArrayRef<Value *> Args = None,
1130 const Twine &Name = "") {
1131 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1132 }
1133
1134 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1135 return Insert(CatchReturnInst::Create(CatchPad, BB));
1136 }
1137
1138 UnreachableInst *CreateUnreachable() {
1139 return Insert(new UnreachableInst(Context));
1140 }
1141
1142 //===--------------------------------------------------------------------===//
1143 // Instruction creation methods: Binary Operators
1144 //===--------------------------------------------------------------------===//
1145private:
1146 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1147 Value *LHS, Value *RHS,
1148 const Twine &Name,
1149 bool HasNUW, bool HasNSW) {
1150 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1151 if (HasNUW) BO->setHasNoUnsignedWrap();
1152 if (HasNSW) BO->setHasNoSignedWrap();
1153 return BO;
1154 }
1155
1156 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1157 FastMathFlags FMF) const {
1158 if (!FPMD)
1159 FPMD = DefaultFPMathTag;
1160 if (FPMD)
1161 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1162 I->setFastMathFlags(FMF);
1163 return I;
1164 }
1165
1166 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1167 Value *R, const Twine &Name) const {
1168 auto *LC = dyn_cast<Constant>(L);
1169 auto *RC = dyn_cast<Constant>(R);
1170 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1171 }
1172
1173 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1174 RoundingMode UseRounding = DefaultConstrainedRounding;
1175
1176 if (Rounding.hasValue())
1177 UseRounding = Rounding.getValue();
1178
1179 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1180 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1180, __extension__ __PRETTY_FUNCTION__))
;
1181 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1182
1183 return MetadataAsValue::get(Context, RoundingMDS);
1184 }
1185
1186 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1187 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1188
1189 if (Except.hasValue())
1190 UseExcept = Except.getValue();
1191
1192 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1193 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1193, __extension__ __PRETTY_FUNCTION__))
;
1194 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1195
1196 return MetadataAsValue::get(Context, ExceptMDS);
1197 }
1198
1199 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1200 assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
1201 Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
1202 Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
1203 "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
;
1204
1205 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1206 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1207
1208 return MetadataAsValue::get(Context, PredicateMDS);
1209 }
1210
1211public:
1212 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1213 bool HasNUW = false, bool HasNSW = false) {
1214 if (auto *LC = dyn_cast<Constant>(LHS))
1215 if (auto *RC = dyn_cast<Constant>(RHS))
1216 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1217 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1218 HasNUW, HasNSW);
1219 }
1220
1221 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1222 return CreateAdd(LHS, RHS, Name, false, true);
1223 }
1224
1225 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1226 return CreateAdd(LHS, RHS, Name, true, false);
1227 }
1228
1229 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1230 bool HasNUW = false, bool HasNSW = false) {
1231 if (auto *LC = dyn_cast<Constant>(LHS))
1232 if (auto *RC = dyn_cast<Constant>(RHS))
1233 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1234 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1235 HasNUW, HasNSW);
1236 }
1237
1238 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1239 return CreateSub(LHS, RHS, Name, false, true);
1240 }
1241
1242 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1243 return CreateSub(LHS, RHS, Name, true, false);
1244 }
1245
1246 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1247 bool HasNUW = false, bool HasNSW = false) {
1248 if (auto *LC = dyn_cast<Constant>(LHS))
1249 if (auto *RC = dyn_cast<Constant>(RHS))
1250 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1251 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1252 HasNUW, HasNSW);
1253 }
1254
1255 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1256 return CreateMul(LHS, RHS, Name, false, true);
1257 }
1258
1259 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1260 return CreateMul(LHS, RHS, Name, true, false);
1261 }
1262
1263 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1264 bool isExact = false) {
1265 if (auto *LC = dyn_cast<Constant>(LHS))
1266 if (auto *RC = dyn_cast<Constant>(RHS))
1267 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1268 if (!isExact)
1269 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1270 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1271 }
1272
1273 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1274 return CreateUDiv(LHS, RHS, Name, true);
1275 }
1276
1277 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1278 bool isExact = false) {
1279 if (auto *LC = dyn_cast<Constant>(LHS))
1280 if (auto *RC = dyn_cast<Constant>(RHS))
1281 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1282 if (!isExact)
1283 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1284 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1285 }
1286
1287 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1288 return CreateSDiv(LHS, RHS, Name, true);
1289 }
1290
1291 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1292 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1293 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1294 }
1295
1296 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1297 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1298 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1299 }
1300
1301 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1302 bool HasNUW = false, bool HasNSW = false) {
1303 if (auto *LC = dyn_cast<Constant>(LHS))
1304 if (auto *RC = dyn_cast<Constant>(RHS))
1305 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1306 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1307 HasNUW, HasNSW);
1308 }
1309
1310 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1311 bool HasNUW = false, bool HasNSW = false) {
1312 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1313 HasNUW, HasNSW);
1314 }
1315
1316 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1317 bool HasNUW = false, bool HasNSW = false) {
1318 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1319 HasNUW, HasNSW);
1320 }
1321
1322 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1323 bool isExact = false) {
1324 if (auto *LC = dyn_cast<Constant>(LHS))
1325 if (auto *RC = dyn_cast<Constant>(RHS))
1326 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1327 if (!isExact)
1328 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1329 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1330 }
1331
1332 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1333 bool isExact = false) {
1334 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1335 }
1336
1337 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1338 bool isExact = false) {
1339 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1340 }
1341
1342 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1343 bool isExact = false) {
1344 if (auto *LC = dyn_cast<Constant>(LHS))
1345 if (auto *RC = dyn_cast<Constant>(RHS))
1346 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1347 if (!isExact)
1348 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1349 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1350 }
1351
1352 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1353 bool isExact = false) {
1354 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1355 }
1356
1357 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1358 bool isExact = false) {
1359 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1360 }
1361
1362 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1363 if (auto *RC = dyn_cast<Constant>(RHS)) {
1364 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1365 return LHS; // LHS & -1 -> LHS
1366 if (auto *LC = dyn_cast<Constant>(LHS))
1367 return Insert(Folder.CreateAnd(LC, RC), Name);
1368 }
1369 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1370 }
1371
1372 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1373 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1374 }
1375
1376 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1377 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1378 }
1379
1380 Value *CreateAnd(ArrayRef<Value*> Ops) {
1381 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1381, __extension__ __PRETTY_FUNCTION__))
;
1382 Value *Accum = Ops[0];
1383 for (unsigned i = 1; i < Ops.size(); i++)
1384 Accum = CreateAnd(Accum, Ops[i]);
1385 return Accum;
1386 }
1387
1388 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1389 if (auto *RC = dyn_cast<Constant>(RHS)) {
1390 if (RC->isNullValue())
1391 return LHS; // LHS | 0 -> LHS
1392 if (auto *LC = dyn_cast<Constant>(LHS))
1393 return Insert(Folder.CreateOr(LC, RC), Name);
1394 }
1395 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1396 }
1397
1398 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1399 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1400 }
1401
1402 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1403 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1404 }
1405
1406 Value *CreateOr(ArrayRef<Value*> Ops) {
1407 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1407, __extension__ __PRETTY_FUNCTION__))
;
1408 Value *Accum = Ops[0];
1409 for (unsigned i = 1; i < Ops.size(); i++)
1410 Accum = CreateOr(Accum, Ops[i]);
1411 return Accum;
1412 }
1413
1414 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1415 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1416 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1417 }
1418
1419 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1420 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1421 }
1422
1423 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1424 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1425 }
1426
1427 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1428 MDNode *FPMD = nullptr) {
1429 if (IsFPConstrained)
1430 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1431 L, R, nullptr, Name, FPMD);
1432
1433 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1434 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1435 return Insert(I, Name);
1436 }
1437
1438 /// Copy fast-math-flags from an instruction rather than using the builder's
1439 /// default FMF.
1440 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1441 const Twine &Name = "") {
1442 if (IsFPConstrained)
1443 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1444 L, R, FMFSource, Name);
1445
1446 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1447 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1448 FMFSource->getFastMathFlags());
1449 return Insert(I, Name);
1450 }
1451
1452 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1453 MDNode *FPMD = nullptr) {
1454 if (IsFPConstrained)
1455 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1456 L, R, nullptr, Name, FPMD);
1457
1458 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1459 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1460 return Insert(I, Name);
1461 }
1462
1463 /// Copy fast-math-flags from an instruction rather than using the builder's
1464 /// default FMF.
1465 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1466 const Twine &Name = "") {
1467 if (IsFPConstrained)
1468 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1469 L, R, FMFSource, Name);
1470
1471 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1472 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1473 FMFSource->getFastMathFlags());
1474 return Insert(I, Name);
1475 }
1476
1477 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1478 MDNode *FPMD = nullptr) {
1479 if (IsFPConstrained)
1480 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1481 L, R, nullptr, Name, FPMD);
1482
1483 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1484 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1485 return Insert(I, Name);
1486 }
1487
1488 /// Copy fast-math-flags from an instruction rather than using the builder's
1489 /// default FMF.
1490 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1491 const Twine &Name = "") {
1492 if (IsFPConstrained)
1493 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1494 L, R, FMFSource, Name);
1495
1496 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1497 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1498 FMFSource->getFastMathFlags());
1499 return Insert(I, Name);
1500 }
1501
1502 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1503 MDNode *FPMD = nullptr) {
1504 if (IsFPConstrained)
1505 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1506 L, R, nullptr, Name, FPMD);
1507
1508 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1509 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1510 return Insert(I, Name);
1511 }
1512
1513 /// Copy fast-math-flags from an instruction rather than using the builder's
1514 /// default FMF.
1515 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1516 const Twine &Name = "") {
1517 if (IsFPConstrained)
1518 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1519 L, R, FMFSource, Name);
1520
1521 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1522 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1523 FMFSource->getFastMathFlags());
1524 return Insert(I, Name);
1525 }
1526
1527 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1528 MDNode *FPMD = nullptr) {
1529 if (IsFPConstrained)
1530 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1531 L, R, nullptr, Name, FPMD);
1532
1533 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1534 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1535 return Insert(I, Name);
1536 }
1537
1538 /// Copy fast-math-flags from an instruction rather than using the builder's
1539 /// default FMF.
1540 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1541 const Twine &Name = "") {
1542 if (IsFPConstrained)
1543 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1544 L, R, FMFSource, Name);
1545
1546 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1547 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1548 FMFSource->getFastMathFlags());
1549 return Insert(I, Name);
1550 }
1551
1552 Value *CreateBinOp(Instruction::BinaryOps Opc,
1553 Value *LHS, Value *RHS, const Twine &Name = "",
1554 MDNode *FPMathTag = nullptr) {
1555 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1556 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1557 if (isa<FPMathOperator>(BinOp))
1558 setFPAttrs(BinOp, FPMathTag, FMF);
1559 return Insert(BinOp, Name);
1560 }
1561
1562 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1563 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1563, __extension__ __PRETTY_FUNCTION__))
;
1564 return CreateSelect(Cond1, Cond2,
1565 ConstantInt::getNullValue(Cond2->getType()), Name);
1566 }
1567
1568 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1569 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 1569, __extension__ __PRETTY_FUNCTION__))
;
1570 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1571 Cond2, Name);
1572 }
1573
1574 CallInst *CreateConstrainedFPBinOp(
1575 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1576 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1577 Optional<RoundingMode> Rounding = None,
1578 Optional<fp::ExceptionBehavior> Except = None);
1579
1580 Value *CreateNeg(Value *V, const Twine &Name = "",
1581 bool HasNUW = false, bool HasNSW = false) {
1582 if (auto *VC = dyn_cast<Constant>(V))
1583 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1584 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1585 if (HasNUW) BO->setHasNoUnsignedWrap();
1586 if (HasNSW) BO->setHasNoSignedWrap();
1587 return BO;
1588 }
1589
1590 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1591 return CreateNeg(V, Name, false, true);
1592 }
1593
1594 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1595 return CreateNeg(V, Name, true, false);
1596 }
1597
1598 Value *CreateFNeg(Value *V, const Twine &Name = "",
1599 MDNode *FPMathTag = nullptr) {
1600 if (auto *VC = dyn_cast<Constant>(V))
1601 return Insert(Folder.CreateFNeg(VC), Name);
1602 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1603 Name);
1604 }
1605
1606 /// Copy fast-math-flags from an instruction rather than using the builder's
1607 /// default FMF.
1608 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1609 const Twine &Name = "") {
1610 if (auto *VC = dyn_cast<Constant>(V))
1611 return Insert(Folder.CreateFNeg(VC), Name);
1612 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1613 FMFSource->getFastMathFlags()),
1614 Name);
1615 }
1616
1617 Value *CreateNot(Value *V, const Twine &Name = "") {
1618 if (auto *VC = dyn_cast<Constant>(V))
1619 return Insert(Folder.CreateNot(VC), Name);
1620 return Insert(BinaryOperator::CreateNot(V), Name);
1621 }
1622
1623 Value *CreateUnOp(Instruction::UnaryOps Opc,
1624 Value *V, const Twine &Name = "",
1625 MDNode *FPMathTag = nullptr) {
1626 if (auto *VC = dyn_cast<Constant>(V))
1627 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1628 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1629 if (isa<FPMathOperator>(UnOp))
1630 setFPAttrs(UnOp, FPMathTag, FMF);
1631 return Insert(UnOp, Name);
1632 }
1633
1634 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1635 /// Correct number of operands must be passed accordingly.
1636 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1637 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1638
1639 //===--------------------------------------------------------------------===//
1640 // Instruction creation methods: Memory Instructions
1641 //===--------------------------------------------------------------------===//
1642
1643 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1644 Value *ArraySize = nullptr, const Twine &Name = "") {
1645 const DataLayout &DL = BB->getModule()->getDataLayout();
1646 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1647 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1648 }
1649
1650 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1651 const Twine &Name = "") {
1652 const DataLayout &DL = BB->getModule()->getDataLayout();
9
Called C++ object pointer is null
1653 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1654 unsigned AddrSpace = DL.getAllocaAddrSpace();
1655 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1656 }
1657
1658 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1659 /// converting the string to 'bool' for the isVolatile parameter.
1660 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1661 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1662 }
1663
1664 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1665 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1666 }
1667
1668 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1669 const Twine &Name = "") {
1670 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1671 }
1672
1673 // Deprecated [opaque pointer types]
1674 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1675 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1676 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1677 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1678 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1679 }
1680
1681 // Deprecated [opaque pointer types]
1682 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1683 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1684 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1685 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1686 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1687 }
1688
1689 // Deprecated [opaque pointer types]
1690 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1691 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1692 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1693 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1694 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1695 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1696 Name);
1697 }
1698
1699 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1700 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1701 }
1702
1703 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1704 const char *Name) {
1705 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1706 }
1707
1708 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1709 const Twine &Name = "") {
1710 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1711 }
1712
1713 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1714 bool isVolatile, const Twine &Name = "") {
1715 if (!Align) {
1716 const DataLayout &DL = BB->getModule()->getDataLayout();
1717 Align = DL.getABITypeAlign(Ty);
1718 }
1719 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1720 }
1721
1722 // Deprecated [opaque pointer types]
1723 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1724 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1725 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1726 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1727 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1728 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1729 Align, Name);
1730 }
1731 // Deprecated [opaque pointer types]
1732 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1733 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1734 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1735 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1736 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1737 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1738 Align, Name);
1739 }
1740 // Deprecated [opaque pointer types]
1741 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1742 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1743 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1744 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1745 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1746 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1747 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1748 Align, isVolatile, Name);
1749 }
1750
1751 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1752 bool isVolatile = false) {
1753 if (!Align) {
1754 const DataLayout &DL = BB->getModule()->getDataLayout();
1755 Align = DL.getABITypeAlign(Val->getType());
1756 }
1757 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1758 }
1759 FenceInst *CreateFence(AtomicOrdering Ordering,
1760 SyncScope::ID SSID = SyncScope::System,
1761 const Twine &Name = "") {
1762 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1763 }
1764
1765 AtomicCmpXchgInst *
1766 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1767 AtomicOrdering SuccessOrdering,
1768 AtomicOrdering FailureOrdering,
1769 SyncScope::ID SSID = SyncScope::System) {
1770 if (!Align) {
1771 const DataLayout &DL = BB->getModule()->getDataLayout();
1772 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1773 }
1774
1775 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1776 FailureOrdering, SSID));
1777 }
1778
1779 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1780 Value *Val, MaybeAlign Align,
1781 AtomicOrdering Ordering,
1782 SyncScope::ID SSID = SyncScope::System) {
1783 if (!Align) {
1784 const DataLayout &DL = BB->getModule()->getDataLayout();
1785 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1786 }
1787
1788 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1789 }
1790
1791 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1792 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1793 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1794 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
{
1795 return CreateGEP(Ptr->getType()->getScalarType()->getPointerElementType(),
1796 Ptr, IdxList, Name);
1797 }
1798
1799 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1800 const Twine &Name = "") {
1801 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1802 // Every index must be constant.
1803 size_t i, e;
1804 for (i = 0, e = IdxList.size(); i != e; ++i)
1805 if (!isa<Constant>(IdxList[i]))
1806 break;
1807 if (i == e)
1808 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1809 }
1810 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1811 }
1812
1813 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1814 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1815 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1816 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
{
1817 return CreateInBoundsGEP(
1818 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1819 Name);
1820 }
1821
1822 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1823 const Twine &Name = "") {
1824 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1825 // Every index must be constant.
1826 size_t i, e;
1827 for (i = 0, e = IdxList.size(); i != e; ++i)
1828 if (!isa<Constant>(IdxList[i]))
1829 break;
1830 if (i == e)
1831 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1832 Name);
1833 }
1834 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1835 }
1836
1837 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1838 if (auto *PC = dyn_cast<Constant>(Ptr))
1839 if (auto *IC = dyn_cast<Constant>(Idx))
1840 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1841 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1842 }
1843
1844 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1845 const Twine &Name = "") {
1846 if (auto *PC = dyn_cast<Constant>(Ptr))
1847 if (auto *IC = dyn_cast<Constant>(Idx))
1848 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1849 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1850 }
1851
1852 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1853 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1854 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1855 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
{
1856 return CreateConstGEP1_32(
1857 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1858 Name);
1859 }
1860
1861 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1862 const Twine &Name = "") {
1863 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1864
1865 if (auto *PC = dyn_cast<Constant>(Ptr))
1866 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1867
1868 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1869 }
1870
1871 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1872 const Twine &Name = "") {
1873 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1874
1875 if (auto *PC = dyn_cast<Constant>(Ptr))
1876 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1877
1878 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1879 }
1880
1881 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1882 const Twine &Name = "") {
1883 Value *Idxs[] = {
1884 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1885 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1886 };
1887
1888 if (auto *PC = dyn_cast<Constant>(Ptr))
1889 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1890
1891 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1892 }
1893
1894 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1895 unsigned Idx1, const Twine &Name = "") {
1896 Value *Idxs[] = {
1897 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1898 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1899 };
1900
1901 if (auto *PC = dyn_cast<Constant>(Ptr))
1902 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1903
1904 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1905 }
1906
1907 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1908 const Twine &Name = "") {
1909 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1910
1911 if (auto *PC = dyn_cast<Constant>(Ptr))
1912 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1913
1914 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1915 }
1916
1917 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1918 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1919 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1920 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
{
1921 return CreateConstGEP1_64(
1922 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1923 Name);
1924 }
1925
1926 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1927 const Twine &Name = "") {
1928 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1929
1930 if (auto *PC = dyn_cast<Constant>(Ptr))
1931 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1932
1933 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1934 }
1935
1936 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1937 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1938 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1939 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
{
1940 return CreateConstInBoundsGEP1_64(
1941 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1942 Name);
1943 }
1944
1945 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1946 const Twine &Name = "") {
1947 Value *Idxs[] = {
1948 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1949 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1950 };
1951
1952 if (auto *PC = dyn_cast<Constant>(Ptr))
1953 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1954
1955 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1956 }
1957
1958 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1959 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1960 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1961 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
{
1962 return CreateConstGEP2_64(
1963 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1964 Idx1, Name);
1965 }
1966
1967 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1968 uint64_t Idx1, const Twine &Name = "") {
1969 Value *Idxs[] = {
1970 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1971 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1972 };
1973
1974 if (auto *PC = dyn_cast<Constant>(Ptr))
1975 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1976
1977 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1978 }
1979
1980 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1981 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1982 uint64_t Idx1, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1983 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
{
1984 return CreateConstInBoundsGEP2_64(
1985 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1986 Idx1, Name);
1987 }
1988
1989 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1990 const Twine &Name = "") {
1991 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1992 }
1993
1994 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1995 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1996 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
{
1997 return CreateConstInBoundsGEP2_32(
1998 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, 0, Idx,
1999 Name);
2000 }
2001
2002 /// Same as CreateGlobalString, but return a pointer with "i8*" type
2003 /// instead of a pointer to array of i8.
2004 ///
2005 /// If no module is given via \p M, it is take from the insertion point basic
2006 /// block.
2007 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
2008 unsigned AddressSpace = 0,
2009 Module *M = nullptr) {
2010 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
2011 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2012 Constant *Indices[] = {Zero, Zero};
2013 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
2014 Indices);
2015 }
2016
2017 //===--------------------------------------------------------------------===//
2018 // Instruction creation methods: Cast/Conversion Operators
2019 //===--------------------------------------------------------------------===//
2020
2021 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
2022 return CreateCast(Instruction::Trunc, V, DestTy, Name);
2023 }
2024
2025 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
2026 return CreateCast(Instruction::ZExt, V, DestTy, Name);
2027 }
2028
2029 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
2030 return CreateCast(Instruction::SExt, V, DestTy, Name);
2031 }
2032
2033 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
2034 /// the value untouched if the type of V is already DestTy.
2035 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
2036 const Twine &Name = "") {
2037 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 2039, __extension__ __PRETTY_FUNCTION__))
2038 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 2039, __extension__ __PRETTY_FUNCTION__))
2039 "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 2039, __extension__ __PRETTY_FUNCTION__))
;
2040 Type *VTy = V->getType();
2041 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2042 return CreateZExt(V, DestTy, Name);
2043 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2044 return CreateTrunc(V, DestTy, Name);
2045 return V;
2046 }
2047
2048 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2049 /// the value untouched if the type of V is already DestTy.
2050 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2051 const Twine &Name = "") {
2052 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 2054, __extension__ __PRETTY_FUNCTION__))
2053 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 2054, __extension__ __PRETTY_FUNCTION__))
2054 "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include/llvm/IR/IRBuilder.h"
, 2054, __extension__ __PRETTY_FUNCTION__))
;
2055 Type *VTy = V->getType();
2056 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2057 return CreateSExt(V, DestTy, Name);
2058 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2059 return CreateTrunc(V, DestTy, Name);
2060 return V;
2061 }
2062
2063 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2064 if (IsFPConstrained)
2065 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2066 V, DestTy, nullptr, Name);
2067 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2068 }
2069
2070 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2071 if (IsFPConstrained)
2072 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2073 V, DestTy, nullptr, Name);
2074 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2075 }
2076
2077 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2078 if (IsFPConstrained)
2079 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2080 V, DestTy, nullptr, Name);
2081 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2082 }
2083
2084 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2085 if (IsFPConstrained)
2086 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2087 V, DestTy, nullptr, Name);
2088 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2089 }
2090
2091 Value *CreateFPTrunc(Value *V, Type *DestTy,
2092 const Twine &Name = "") {
2093 if (IsFPConstrained)
2094 return CreateConstrainedFPCast(
2095 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2096 Name);
2097 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2098 }
2099
2100 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2101 if (IsFPConstrained)
2102 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2103 V, DestTy, nullptr, Name);
2104 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2105 }
2106
2107 Value *CreatePtrToInt(Value *V, Type *DestTy,
2108 const Twine &Name = "") {
2109 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2110 }
2111
2112 Value *CreateIntToPtr(Value *V, Type *DestTy,
2113 const Twine &Name = "") {
2114 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2115 }
2116
2117 Value *CreateBitCast(Value *V, Type *DestTy,
2118 const Twine &Name = "") {
2119 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2120 }
2121
2122 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2123 const Twine &Name = "") {
2124 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2125 }
2126
2127 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2128 const Twine &Name = "") {
2129 if (V->getType() == DestTy)
2130 return V;
2131 if (auto *VC = dyn_cast<Constant>(V))
2132 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2133 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2134 }
2135
2136 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2137 const Twine &Name = "") {
2138 if (V->getType() == DestTy)
2139 return V;
2140 if (auto *VC = dyn_cast<Constant>(V))
2141 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2142 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2143 }
2144
2145 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2146 const Twine &Name = "") {
2147 if (V->getType() == DestTy)
2148 return V;
2149 if (auto *VC = dyn_cast<Constant>(V))
2150 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2151 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2152 }
2153
2154 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2155 const Twine &Name = "") {
2156 if (V->getType() == DestTy)
2157 return V;
2158 if (auto *VC = dyn_cast<Constant>(V))
2159 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2160 return Insert(CastInst::Create(Op, V, DestTy), Name);
2161 }
2162
2163 Value *CreatePointerCast(Value *V, Type *DestTy,
2164 const Twine &Name = "") {
2165 if (V->getType() == DestTy)
2166 return V;
2167 if (auto *VC = dyn_cast<Constant>(V))
2168 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2169 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2170 }
2171
2172 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2173 const Twine &Name = "") {
2174 if (V->getType() == DestTy)
2175 return V;
2176
2177 if (auto *VC = dyn_cast<Constant>(V)) {
2178 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2179 Name);
2180 }
2181
2182 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2183 Name);
2184 }
2185
2186 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2187 const Twine &Name = "") {
2188 if (V->getType() == DestTy)
2189 return V;
2190 if (auto *VC = dyn_cast<Constant>(V))
2191 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2192 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2193 }
2194
2195 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2196 const Twine &Name = "") {
2197 if (V->getType() == DestTy)
2198 return V;
2199 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2200 return CreatePtrToInt(V, DestTy, Name);
2201 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2202 return CreateIntToPtr(V, DestTy, Name);
2203
2204 return CreateBitCast(V, DestTy, Name);
2205 }
2206
2207 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2208 if (V->getType() == DestTy)
2209 return V;
2210 if (auto *VC = dyn_cast<Constant>(V))
2211 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2212 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2213 }
2214
2215 CallInst *CreateConstrainedFPCast(
2216 Intrinsic::ID ID, Value *V, Type *DestTy,
2217 Instruction *FMFSource = nullptr, const Twine &Name = "",
2218 MDNode *FPMathTag = nullptr,
2219 Optional<RoundingMode> Rounding = None,
2220 Optional<fp::ExceptionBehavior> Except = None);
2221
2222 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2223 // compile time error, instead of converting the string to bool for the
2224 // isSigned parameter.
2225 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2226
2227 //===--------------------------------------------------------------------===//
2228 // Instruction creation methods: Compare Instructions
2229 //===--------------------------------------------------------------------===//
2230
2231 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2232 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2233 }
2234
2235 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2236 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2237 }
2238
2239 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2240 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2241 }
2242
2243 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2244 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2245 }
2246
2247 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2248 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2249 }
2250
2251 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2252 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2253 }
2254
2255 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2256 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2257 }
2258
2259 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2260 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2261 }
2262
2263 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2264 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2265 }
2266
2267 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2268 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2269 }
2270
2271 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2272 MDNode *FPMathTag = nullptr) {
2273 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2274 }
2275
2276 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2277 MDNode *FPMathTag = nullptr) {
2278 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2279 }
2280
2281 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2282 MDNode *FPMathTag = nullptr) {
2283 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2284 }
2285
2286 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2287 MDNode *FPMathTag = nullptr) {
2288 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2289 }
2290
2291 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2292 MDNode *FPMathTag = nullptr) {
2293 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2294 }
2295
2296 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2297 MDNode *FPMathTag = nullptr) {
2298 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2299 }
2300
2301 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2302 MDNode *FPMathTag = nullptr) {
2303 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2304 }
2305
2306 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2307 MDNode *FPMathTag = nullptr) {
2308 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2309 }
2310
2311 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2312 MDNode *FPMathTag = nullptr) {
2313 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2314 }
2315
2316 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2317 MDNode *FPMathTag = nullptr) {
2318 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2319 }
2320
2321 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2322 MDNode *FPMathTag = nullptr) {
2323 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2324 }
2325
2326 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2327 MDNode *FPMathTag = nullptr) {
2328 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2329 }
2330
2331 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2332 MDNode *FPMathTag = nullptr) {
2333 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2334 }
2335
2336 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2337 MDNode *FPMathTag = nullptr) {
2338 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2339 }
2340
2341 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2342 const Twine &Name = "") {
2343 if (auto *LC = dyn_cast<Constant>(LHS))
2344 if (auto *RC = dyn_cast<Constant>(RHS))
2345 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2346 return Insert(new ICmpInst(P, LHS, RHS), Name);
2347 }
2348
2349 // Create a quiet floating-point comparison (i.e. one that raises an FP
2350 // exception only in the case where an input is a signaling NaN).
2351 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2352 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2353 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2354 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2355 }
2356
2357 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2358 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2359 return CmpInst::isFPPredicate(Pred)
2360 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2361 : CreateICmp(Pred, LHS, RHS, Name);
2362 }
2363
2364 // Create a signaling floating-point comparison (i.e. one that raises an FP
2365 // exception whenever an input is any NaN, signaling or quiet).
2366 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2367 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2368 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2369 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2370 }
2371
2372private:
2373 // Helper routine to create either a signaling or a quiet FP comparison.
2374 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2375 const Twine &Name, MDNode *FPMathTag,
2376 bool IsSignaling);
2377
2378public:
2379 CallInst *CreateConstrainedFPCmp(
2380 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2381 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2382
2383 //===--------------------------------------------------------------------===//
2384 // Instruction creation methods: Other Instructions
2385 //===--------------------------------------------------------------------===//
2386
2387 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2388 const Twine &Name = "") {
2389 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2390 if (isa<FPMathOperator>(Phi))
2391 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2392 return Insert(Phi, Name);
2393 }
2394
2395 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2396 ArrayRef<Value *> Args = None, const Twine &Name = "",
2397 MDNode *FPMathTag = nullptr) {
2398 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2399 if (IsFPConstrained)
2400 setConstrainedFPCallAttr(CI);
2401 if (isa<FPMathOperator>(CI))
2402 setFPAttrs(CI, FPMathTag, FMF);
2403 return Insert(CI, Name);
2404 }
2405
2406 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2407 ArrayRef<OperandBundleDef> OpBundles,
2408 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2409 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2410 if (IsFPConstrained)
2411 setConstrainedFPCallAttr(CI);
2412 if (isa<FPMathOperator>(CI))
2413 setFPAttrs(CI, FPMathTag, FMF);
2414 return Insert(CI, Name);
2415 }
2416
2417 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2418 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2419 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2420 FPMathTag);
2421 }
2422
2423 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2424 ArrayRef<OperandBundleDef> OpBundles,
2425 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2426 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2427 OpBundles, Name, FPMathTag);
2428 }
2429
2430 CallInst *CreateConstrainedFPCall(
2431 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2432 Optional<RoundingMode> Rounding = None,
2433 Optional<fp::ExceptionBehavior> Except = None);
2434
2435 Value *CreateSelect(Value *C, Value *True, Value *False,
2436 const Twine &Name = "", Instruction *MDFrom = nullptr);
2437
2438 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2439 return Insert(new VAArgInst(List, Ty), Name);
2440 }
2441
2442 Value *CreateExtractElement(Value *Vec, Value *Idx,
2443 const Twine &Name = "") {
2444 if (auto *VC = dyn_cast<Constant>(Vec))
2445 if (auto *IC = dyn_cast<Constant>(Idx))
2446 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2447 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2448 }
2449
2450 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2451 const Twine &Name = "") {
2452 return CreateExtractElement(Vec, getInt64(Idx), Name);
2453 }
2454
2455 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2456 const Twine &Name = "") {
2457 if (auto *VC = dyn_cast<Constant>(Vec))
2458 if (auto *NC = dyn_cast<Constant>(NewElt))
2459 if (auto *IC = dyn_cast<Constant>(Idx))
2460 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2461 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2462 }
2463
2464 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2465 const Twine &Name = "") {
2466 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2467 }
2468
2469 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2470 const Twine &Name = "") {
2471 SmallVector<int, 16> IntMask;
2472 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2473 return CreateShuffleVector(V1, V2, IntMask, Name);
2474 }
2475
2476 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2477 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2478 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2479 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2480 SmallVector<int, 16> IntMask;
2481 IntMask.assign(Mask.begin(), Mask.end());
2482 return CreateShuffleVector(V1, V2, IntMask, Name);
2483 }
2484
2485 /// See class ShuffleVectorInst for a description of the mask representation.
2486 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2487 const Twine &Name = "") {
2488 if (auto *V1C = dyn_cast<Constant>(V1))
2489 if (auto *V2C = dyn_cast<Constant>(V2))
2490 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2491 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2492 }
2493
2494 /// Create a unary shuffle. The second vector operand of the IR instruction
2495 /// is poison.
2496 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2497 const Twine &Name = "") {
2498 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2499 }
2500
2501 Value *CreateExtractValue(Value *Agg,
2502 ArrayRef<unsigned> Idxs,
2503 const Twine &Name = "") {
2504 if (auto *AggC = dyn_cast<Constant>(Agg))
2505 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2506 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2507 }
2508
2509 Value *CreateInsertValue(Value *Agg, Value *Val,
2510 ArrayRef<unsigned> Idxs,
2511 const Twine &Name = "") {
2512 if (auto *AggC = dyn_cast<Constant>(Agg))
2513 if (auto *ValC = dyn_cast<Constant>(Val))
2514 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2515 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2516 }
2517
2518 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2519 const Twine &Name = "") {
2520 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2521 }
2522
2523 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2524 return Insert(new FreezeInst(V), Name);
2525 }
2526
2527 //===--------------------------------------------------------------------===//
2528 // Utility creation methods
2529 //===--------------------------------------------------------------------===//
2530
2531 /// Return an i1 value testing if \p Arg is null.
2532 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2533 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2534 Name);
2535 }
2536
2537 /// Return an i1 value testing if \p Arg is not null.
2538 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2539 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2540 Name);
2541 }
2542
2543 /// Return the i64 difference between two pointer values, dividing out
2544 /// the size of the pointed-to objects.
2545 ///
2546 /// This is intended to implement C-style pointer subtraction. As such, the
2547 /// pointers must be appropriately aligned for their element types and
2548 /// pointing into the same object.
2549 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2550
2551 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2552 /// different from pointer to i8, it's casted to pointer to i8 in the same
2553 /// address space before call and casted back to Ptr type after call.
2554 Value *CreateLaunderInvariantGroup(Value *Ptr);
2555
2556 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2557 /// different from pointer to i8, it's casted to pointer to i8 in the same
2558 /// address space before call and casted back to Ptr type after call.
2559 Value *CreateStripInvariantGroup(Value *Ptr);
2560
2561 /// Return a vector value that contains the vector V reversed
2562 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2563
2564 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2565 /// return a shufflevector. If the immediate is positive, a vector is
2566 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2567 /// is negative, we extract -Imm elements from V1 and the remaining
2568 /// elements from V2. Imm is a signed integer in the range
2569 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2570 /// source/result vector)
2571 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2572 const Twine &Name = "");
2573
2574 /// Return a vector value that contains \arg V broadcasted to \p
2575 /// NumElts elements.
2576 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2577
2578 /// Return a vector value that contains \arg V broadcasted to \p
2579 /// EC elements.
2580 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2581
2582 /// Return a value that has been extracted from a larger integer type.
2583 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2584 IntegerType *ExtractedTy, uint64_t Offset,
2585 const Twine &Name);
2586
2587 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2588 unsigned Dimension, unsigned LastIndex,
2589 MDNode *DbgInfo);
2590
2591 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2592 MDNode *DbgInfo);
2593
2594 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2595 unsigned Index, unsigned FieldIndex,
2596 MDNode *DbgInfo);
2597
2598private:
2599 /// Helper function that creates an assume intrinsic call that
2600 /// represents an alignment assumption on the provided pointer \p PtrValue
2601 /// with offset \p OffsetValue and alignment value \p AlignValue.
2602 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2603 Value *PtrValue, Value *AlignValue,
2604 Value *OffsetValue);
2605
2606public:
2607 /// Create an assume intrinsic call that represents an alignment
2608 /// assumption on the provided pointer.
2609 ///
2610 /// An optional offset can be provided, and if it is provided, the offset
2611 /// must be subtracted from the provided pointer to get the pointer with the
2612 /// specified alignment.
2613 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2614 unsigned Alignment,
2615 Value *OffsetValue = nullptr);
2616
2617 /// Create an assume intrinsic call that represents an alignment
2618 /// assumption on the provided pointer.
2619 ///
2620 /// An optional offset can be provided, and if it is provided, the offset
2621 /// must be subtracted from the provided pointer to get the pointer with the
2622 /// specified alignment.
2623 ///
2624 /// This overload handles the condition where the Alignment is dependent
2625 /// on an existing value rather than a static value.
2626 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2627 Value *Alignment,
2628 Value *OffsetValue = nullptr);
2629};
2630
2631/// This provides a uniform API for creating instructions and inserting
2632/// them into a basic block: either at the end of a BasicBlock, or at a specific
2633/// iterator location in a block.
2634///
2635/// Note that the builder does not expose the full generality of LLVM
2636/// instructions. For access to extra instruction properties, use the mutators
2637/// (e.g. setVolatile) on the instructions after they have been
2638/// created. Convenience state exists to specify fast-math flags and fp-math
2639/// tags.
2640///
2641/// The first template argument specifies a class to use for creating constants.
2642/// This defaults to creating minimally folded constants. The second template
2643/// argument allows clients to specify custom insertion hooks that are called on
2644/// every newly created insertion.
2645template <typename FolderTy = ConstantFolder,
2646 typename InserterTy = IRBuilderDefaultInserter>
2647class IRBuilder : public IRBuilderBase {
2648private:
2649 FolderTy Folder;
2650 InserterTy Inserter;
2651
2652public:
2653 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2654 MDNode *FPMathTag = nullptr,
2655 ArrayRef<OperandBundleDef> OpBundles = None)
2656 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2657 Folder(Folder), Inserter(Inserter) {}
2658
2659 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2660 ArrayRef<OperandBundleDef> OpBundles = None)
2661 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2662
2663 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2664 MDNode *FPMathTag = nullptr,
2665 ArrayRef<OperandBundleDef> OpBundles = None)
2666 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2667 FPMathTag, OpBundles), Folder(Folder) {
2668 SetInsertPoint(TheBB);
2669 }
2670
2671 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2672 ArrayRef<OperandBundleDef> OpBundles = None)
2673 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2674 FPMathTag, OpBundles) {
2675 SetInsertPoint(TheBB);
2676 }
2677
2678 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2679 ArrayRef<OperandBundleDef> OpBundles = None)
2680 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
2681 FPMathTag, OpBundles) {
2682 SetInsertPoint(IP);
2683 }
2684
2685 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2686 MDNode *FPMathTag = nullptr,
2687 ArrayRef<OperandBundleDef> OpBundles = None)
2688 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2689 FPMathTag, OpBundles), Folder(Folder) {
2690 SetInsertPoint(TheBB, IP);
2691 }
2692
2693 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2694 MDNode *FPMathTag = nullptr,
2695 ArrayRef<OperandBundleDef> OpBundles = None)
2696 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2697 FPMathTag, OpBundles) {
2698 SetInsertPoint(TheBB, IP);
2699 }
2700
2701 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2702 /// or FastMathFlagGuard instead.
2703 IRBuilder(const IRBuilder &) = delete;
2704
2705 InserterTy &getInserter() { return Inserter; }
2706};
2707
2708// Create wrappers for C Binding types (see CBindingWrapping.h).
2709DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2710
2711} // end namespace llvm
2712
2713#endif // LLVM_IR_IRBUILDER_H